hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
6355435e2e084d693734c8f7dcd671de11ffa8c7.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
//#include <hip/hip_runtime.h>
#include "bspline.h"
#include "common.h"
using std::cout;
int main(int argc, char **argv)
/*
testing bspline_1d to bspline_5d with order 6
you can change order with command line parameter, e.g. ./bspline 4
*/
{
double *value,*d1,*d2,*d3,*d4,*d5;
CHECK(hipMallocManaged((void **)&value, sizeof(double)));
CHECK(hipMallocManaged((void **)&d1, sizeof(double)));
CHECK(hipMallocManaged((void **)&d2, sizeof(double)));
CHECK(hipMallocManaged((void **)&d3, sizeof(double)));
CHECK(hipMallocManaged((void **)&d4, sizeof(double)));
CHECK(hipMallocManaged((void **)&d5, sizeof(double)));
double r_value, r_d1, r_d2, r_d3, r_d4, r_d5;
double start, end;
int order = 6;
if(argc > 1 && isdigit(argv[1][0])) order = atol(argv[1]);
int num_x1 = 100+1;
double min_x1 = 0.0, max_x1 = 1.0;
double width_x1 = (max_x1-min_x1) / (num_x1-1);
double x1 = 0.5;
// test bspline_1d for data generated from function x^2
double *data1;
CHECK(hipMallocManaged((void **)&data1, num_x1*sizeof(double)));
for(int i = 0; i < num_x1; i++)
data1[i] = (width_x1*i) * (width_x1*i);
start = seconds();
hipLaunchKernelGGL(( bspline_1d), dim3(1), dim3(1), 0, 0, data1, min_x1, max_x1,
num_x1, x1, order, value,
d1, true, d2, true);
hipDeviceSynchronize();
end = seconds();
cout << std::fixed;
cout << "bspline_1d test:\n";
cout << "Calculated:\n";
cout << *value << '\t' << *d1 << '\t' << *d2 << '\n';
cout << "Expected:\n";
cout << x1*x1 << '\t' << 2*x1 << '\t' << 2.0 << '\n';
cout << "Time: " << end - start << " sec" << '\n';
cout << "\n\n";
/*
test bspline_2d for data generated from function x1^2*x2
*/
num_x1 = 100;
min_x1 = 0.005;
max_x1 = 0.995;
width_x1 = (max_x1-min_x1) / (num_x1-1);
x1 = 0.3;
int num_x2 = 24+1;
double min_x2 = -10000.0, max_x2 = 10000.0;
double width_x2 = (max_x2-min_x2) / (num_x2-1);
double *data2; // new double[num_x1*num_x2];
double x2 = 4000.0;
CHECK(hipMallocManaged((void **)&data2, num_x1*num_x2*sizeof(double)));
for(int i = 0; i < num_x1; i++)
for(int j = 0; j < num_x2; j++)
data2[i*num_x2+j] = (min_x1+width_x1*i) * (min_x1+width_x1*i) *
(min_x2+width_x2*j);
start = seconds();
hipLaunchKernelGGL(( bspline_2d), dim3(1), dim3(1), 0, 0, data2, min_x1, max_x1, num_x1, x1,
min_x2, max_x2, num_x2, x2,
order, value, d1, d2, true);
hipDeviceSynchronize();
end = seconds();
cout << std::fixed;
cout << "bspline_2d test:\n";
cout << "Calculated:\n";
cout << *value << '\t' << *d1 << '\t' << *d2 << '\n';
cout << "Expected:\n";
cout << x1*x1*x2 << '\t' << 2*x1*x2 << '\t' << x1*x1 << '\n';
cout << "Time: " << end - start << " sec" << '\n';
cout << "\n\n";
/*
test bspline_3d for data generated from function x1^2*x2*x3
*/
int num_x3 = 24+1;
double min_x3 = -0.01, max_x3 = 0.01;
double width_x3 = (max_x3-min_x3) / (num_x3-1);
double x3 = 0.005;
double *data3; // new double[num_x1*num_x2*num_x3];
CHECK(hipMallocManaged((void **)&data3, num_x1*num_x2*num_x3*sizeof(double)));
for(int i = 0; i < num_x1; i++)
for(int j = 0; j < num_x2; j++)
for(int k = 0; k < num_x3; k++)
data3[i*num_x2*num_x3+j*num_x3+k] = (min_x1+width_x1*i) * (min_x1+width_x1*i) *
(min_x2+width_x2*j) * (min_x3+width_x3*k);
start = seconds();
/*
bspline_3d<<<1, order*order*order>>>(data3, min_x1, max_x1, num_x1, x1,
min_x2, max_x2, num_x2, x2,
min_x3, max_x3, num_x3, x3,
order, value, d1, d2, d3, true);
hipDeviceSynchronize();
*/
bspline_3d_ex(data3, min_x1, max_x1, num_x1, x1,
min_x2, max_x2, num_x2, x2,
min_x3, max_x3, num_x3, x3,
order, r_value, r_d1, r_d2, r_d3, true);
end = seconds();
cout << std::fixed;
cout << "bspline_3d test:\n";
cout << "Calculated:\n";
cout << r_value << '\t' << r_d1 << '\t' << r_d2 << '\t' << r_d3 << '\n';
cout << "Expected:\n";
cout << x1*x1*x2*x3 << '\t' << 2*x1*x2*x3 << '\t' << x1*x1*x3 << '\t' << x1*x1*x2 << '\n';
cout << "Time: " << end - start << " sec" << '\n';
cout << "\n\n";
/*
test bspline_4d for data generated from function x1^2*x2*x3*x4
*/
int num_x4 = 24+1;
double min_x4 = -10000.0, max_x4 = 10000.0;
double width_x4 = (max_x4-min_x4) / (num_x4-1);
double x4 = 6000.0;
double *data4; // new double[num_x1*num_x2*num_x3*num_x4];
CHECK(hipMallocManaged((void **)&data4, num_x1*num_x2*num_x3*num_x4*sizeof(double)));
for(int i = 0; i < num_x1; i++)
for(int j = 0; j < num_x2; j++)
for(int k = 0; k < num_x3; k++)
for(int m = 0; m < num_x4; m++)
data4[i*num_x2*num_x3*num_x4+j*num_x3*num_x4+k*num_x4+m] = (min_x1+width_x1*i) * (min_x1+width_x1*i) *
(min_x2+width_x2*j) * (min_x3+width_x3*k) * (min_x4+width_x4*m);
start = seconds();
bspline_4d_ex(data4, min_x1, max_x1, num_x1, x1,
min_x2, max_x2, num_x2, x2,
min_x3, max_x3, num_x3, x3,
min_x4, max_x4, num_x4, x4,
order, r_value, r_d1, r_d2, r_d3, r_d4, true);
end = seconds();
cout << std::fixed;
cout << "bspline_4d test:\n";
cout << "Calculated:\n";
cout << r_value << '\t' << r_d1 << '\t' << r_d2 << '\t' << r_d3 << '\t' << r_d4 << '\n';
cout << "Expected:\n";
cout << x1*x1*x2*x3*x4 << '\t' << 2*x1*x2*x3*x4 << '\t' << x1*x1*x3*x4
<< '\t' << x1*x1*x2*x4 << '\t' << x1*x1*x2*x3 << '\n';
cout << "Time: " << end - start << " sec" << '\n';
cout << "\n\n";
/*
test bspline_5d for data generated from function x1^2*x2*x3*x4*x5
*/
int num_x5 = 24+1;
double min_x5 = -0.01, max_x5 = 0.01;
double width_x5 = (max_x5-min_x5) / (num_x5-1);
double x5 = 0.007;
double *data5;
int nBytes = num_x1*num_x2*num_x3*num_x4*num_x5*sizeof(double);
//CHECK(hipMallocManaged((void **)&data5, nBytes));
CHECK(hipMalloc((void **) &data5, nBytes));
double *h_data5 = new double[nBytes];
for(int i = 0; i < num_x1; i++)
for(int j = 0; j < num_x2; j++)
for(int k = 0; k < num_x3; k++)
for(int m = 0; m < num_x4; m++)
for(int n = 0; n < num_x5; n++)
h_data5[i*num_x2*num_x3*num_x4*num_x5+j*num_x3*num_x4*num_x5+k*num_x4*num_x5+m*num_x5+n]
= (min_x1+width_x1*i) * (min_x1+width_x1*i) *
(min_x2+width_x2*j) * (min_x3+width_x3*k) * (min_x4+width_x4*m) * (min_x5+width_x5*n);
CHECK(hipMemcpy(data5, h_data5, nBytes, hipMemcpyHostToDevice));
delete[] h_data5;
start = seconds();
bspline_5d_ex(data5, min_x1, max_x1, num_x1, x1,
min_x2, max_x2, num_x2, x2,
min_x3, max_x3, num_x3, x3,
min_x4, max_x4, num_x4, x4,
min_x5, max_x5, num_x5, x5,
order, r_value, r_d1, r_d2, r_d3, r_d4, r_d5, true);
end = seconds();
cout << std::fixed;
cout << "bspline_5d test:\n";
cout << "Calculated:\n";
cout << r_value << '\t' << r_d1 << '\t' << r_d2 << '\t' << r_d3
<< '\t' << r_d4 << '\t' << r_d5 << '\n';
cout << "Expected:\n";
cout << x1*x1*x2*x3*x4*x5 << '\t' << 2*x1*x2*x3*x4*x5 << '\t' << x1*x1*x3*x4*x5
<< '\t' << x1*x1*x2*x4*x5 << '\t' << x1*x1*x2*x3*x5 << '\t' << x1*x1*x2*x3*x4 << '\n';
cout << "Time: " << end - start << " sec" << '\n';
cout << "\n\n";
// free device global memory
CHECK(hipFree(data1));
CHECK(hipFree(data2));
CHECK(hipFree(data3));
CHECK(hipFree(data4));
CHECK(hipFree(data5));
CHECK(hipFree(value));
CHECK(hipFree(d1));
CHECK(hipFree(d2));
CHECK(hipFree(d3));
CHECK(hipFree(d4));
CHECK(hipFree(d5));
// reset device
CHECK(hipDeviceReset());
return 0;
}
| 6355435e2e084d693734c8f7dcd671de11ffa8c7.cu | #include <iostream>
//#include <cuda_runtime.h>
#include "bspline.h"
#include "common.h"
using std::cout;
int main(int argc, char **argv)
/*
testing bspline_1d to bspline_5d with order 6
you can change order with command line parameter, e.g. ./bspline 4
*/
{
double *value,*d1,*d2,*d3,*d4,*d5;
CHECK(cudaMallocManaged((void **)&value, sizeof(double)));
CHECK(cudaMallocManaged((void **)&d1, sizeof(double)));
CHECK(cudaMallocManaged((void **)&d2, sizeof(double)));
CHECK(cudaMallocManaged((void **)&d3, sizeof(double)));
CHECK(cudaMallocManaged((void **)&d4, sizeof(double)));
CHECK(cudaMallocManaged((void **)&d5, sizeof(double)));
double r_value, r_d1, r_d2, r_d3, r_d4, r_d5;
double start, end;
int order = 6;
if(argc > 1 && isdigit(argv[1][0])) order = atol(argv[1]);
int num_x1 = 100+1;
double min_x1 = 0.0, max_x1 = 1.0;
double width_x1 = (max_x1-min_x1) / (num_x1-1);
double x1 = 0.5;
// test bspline_1d for data generated from function x^2
double *data1;
CHECK(cudaMallocManaged((void **)&data1, num_x1*sizeof(double)));
for(int i = 0; i < num_x1; i++)
data1[i] = (width_x1*i) * (width_x1*i);
start = seconds();
bspline_1d<<<1, 1>>>(data1, min_x1, max_x1,
num_x1, x1, order, value,
d1, true, d2, true);
cudaDeviceSynchronize();
end = seconds();
cout << std::fixed;
cout << "bspline_1d test:\n";
cout << "Calculated:\n";
cout << *value << '\t' << *d1 << '\t' << *d2 << '\n';
cout << "Expected:\n";
cout << x1*x1 << '\t' << 2*x1 << '\t' << 2.0 << '\n';
cout << "Time: " << end - start << " sec" << '\n';
cout << "\n\n";
/*
test bspline_2d for data generated from function x1^2*x2
*/
num_x1 = 100;
min_x1 = 0.005;
max_x1 = 0.995;
width_x1 = (max_x1-min_x1) / (num_x1-1);
x1 = 0.3;
int num_x2 = 24+1;
double min_x2 = -10000.0, max_x2 = 10000.0;
double width_x2 = (max_x2-min_x2) / (num_x2-1);
double *data2; // new double[num_x1*num_x2];
double x2 = 4000.0;
CHECK(cudaMallocManaged((void **)&data2, num_x1*num_x2*sizeof(double)));
for(int i = 0; i < num_x1; i++)
for(int j = 0; j < num_x2; j++)
data2[i*num_x2+j] = (min_x1+width_x1*i) * (min_x1+width_x1*i) *
(min_x2+width_x2*j);
start = seconds();
bspline_2d<<<1, 1>>>(data2, min_x1, max_x1, num_x1, x1,
min_x2, max_x2, num_x2, x2,
order, value, d1, d2, true);
cudaDeviceSynchronize();
end = seconds();
cout << std::fixed;
cout << "bspline_2d test:\n";
cout << "Calculated:\n";
cout << *value << '\t' << *d1 << '\t' << *d2 << '\n';
cout << "Expected:\n";
cout << x1*x1*x2 << '\t' << 2*x1*x2 << '\t' << x1*x1 << '\n';
cout << "Time: " << end - start << " sec" << '\n';
cout << "\n\n";
/*
test bspline_3d for data generated from function x1^2*x2*x3
*/
int num_x3 = 24+1;
double min_x3 = -0.01, max_x3 = 0.01;
double width_x3 = (max_x3-min_x3) / (num_x3-1);
double x3 = 0.005;
double *data3; // new double[num_x1*num_x2*num_x3];
CHECK(cudaMallocManaged((void **)&data3, num_x1*num_x2*num_x3*sizeof(double)));
for(int i = 0; i < num_x1; i++)
for(int j = 0; j < num_x2; j++)
for(int k = 0; k < num_x3; k++)
data3[i*num_x2*num_x3+j*num_x3+k] = (min_x1+width_x1*i) * (min_x1+width_x1*i) *
(min_x2+width_x2*j) * (min_x3+width_x3*k);
start = seconds();
/*
bspline_3d<<<1, order*order*order>>>(data3, min_x1, max_x1, num_x1, x1,
min_x2, max_x2, num_x2, x2,
min_x3, max_x3, num_x3, x3,
order, value, d1, d2, d3, true);
cudaDeviceSynchronize();
*/
bspline_3d_ex(data3, min_x1, max_x1, num_x1, x1,
min_x2, max_x2, num_x2, x2,
min_x3, max_x3, num_x3, x3,
order, r_value, r_d1, r_d2, r_d3, true);
end = seconds();
cout << std::fixed;
cout << "bspline_3d test:\n";
cout << "Calculated:\n";
cout << r_value << '\t' << r_d1 << '\t' << r_d2 << '\t' << r_d3 << '\n';
cout << "Expected:\n";
cout << x1*x1*x2*x3 << '\t' << 2*x1*x2*x3 << '\t' << x1*x1*x3 << '\t' << x1*x1*x2 << '\n';
cout << "Time: " << end - start << " sec" << '\n';
cout << "\n\n";
/*
test bspline_4d for data generated from function x1^2*x2*x3*x4
*/
int num_x4 = 24+1;
double min_x4 = -10000.0, max_x4 = 10000.0;
double width_x4 = (max_x4-min_x4) / (num_x4-1);
double x4 = 6000.0;
double *data4; // new double[num_x1*num_x2*num_x3*num_x4];
CHECK(cudaMallocManaged((void **)&data4, num_x1*num_x2*num_x3*num_x4*sizeof(double)));
for(int i = 0; i < num_x1; i++)
for(int j = 0; j < num_x2; j++)
for(int k = 0; k < num_x3; k++)
for(int m = 0; m < num_x4; m++)
data4[i*num_x2*num_x3*num_x4+j*num_x3*num_x4+k*num_x4+m] = (min_x1+width_x1*i) * (min_x1+width_x1*i) *
(min_x2+width_x2*j) * (min_x3+width_x3*k) * (min_x4+width_x4*m);
start = seconds();
bspline_4d_ex(data4, min_x1, max_x1, num_x1, x1,
min_x2, max_x2, num_x2, x2,
min_x3, max_x3, num_x3, x3,
min_x4, max_x4, num_x4, x4,
order, r_value, r_d1, r_d2, r_d3, r_d4, true);
end = seconds();
cout << std::fixed;
cout << "bspline_4d test:\n";
cout << "Calculated:\n";
cout << r_value << '\t' << r_d1 << '\t' << r_d2 << '\t' << r_d3 << '\t' << r_d4 << '\n';
cout << "Expected:\n";
cout << x1*x1*x2*x3*x4 << '\t' << 2*x1*x2*x3*x4 << '\t' << x1*x1*x3*x4
<< '\t' << x1*x1*x2*x4 << '\t' << x1*x1*x2*x3 << '\n';
cout << "Time: " << end - start << " sec" << '\n';
cout << "\n\n";
/*
test bspline_5d for data generated from function x1^2*x2*x3*x4*x5
*/
int num_x5 = 24+1;
double min_x5 = -0.01, max_x5 = 0.01;
double width_x5 = (max_x5-min_x5) / (num_x5-1);
double x5 = 0.007;
double *data5;
int nBytes = num_x1*num_x2*num_x3*num_x4*num_x5*sizeof(double);
//CHECK(cudaMallocManaged((void **)&data5, nBytes));
CHECK(cudaMalloc((void **) &data5, nBytes));
double *h_data5 = new double[nBytes];
for(int i = 0; i < num_x1; i++)
for(int j = 0; j < num_x2; j++)
for(int k = 0; k < num_x3; k++)
for(int m = 0; m < num_x4; m++)
for(int n = 0; n < num_x5; n++)
h_data5[i*num_x2*num_x3*num_x4*num_x5+j*num_x3*num_x4*num_x5+k*num_x4*num_x5+m*num_x5+n]
= (min_x1+width_x1*i) * (min_x1+width_x1*i) *
(min_x2+width_x2*j) * (min_x3+width_x3*k) * (min_x4+width_x4*m) * (min_x5+width_x5*n);
CHECK(cudaMemcpy(data5, h_data5, nBytes, cudaMemcpyHostToDevice));
delete[] h_data5;
start = seconds();
bspline_5d_ex(data5, min_x1, max_x1, num_x1, x1,
min_x2, max_x2, num_x2, x2,
min_x3, max_x3, num_x3, x3,
min_x4, max_x4, num_x4, x4,
min_x5, max_x5, num_x5, x5,
order, r_value, r_d1, r_d2, r_d3, r_d4, r_d5, true);
end = seconds();
cout << std::fixed;
cout << "bspline_5d test:\n";
cout << "Calculated:\n";
cout << r_value << '\t' << r_d1 << '\t' << r_d2 << '\t' << r_d3
<< '\t' << r_d4 << '\t' << r_d5 << '\n';
cout << "Expected:\n";
cout << x1*x1*x2*x3*x4*x5 << '\t' << 2*x1*x2*x3*x4*x5 << '\t' << x1*x1*x3*x4*x5
<< '\t' << x1*x1*x2*x4*x5 << '\t' << x1*x1*x2*x3*x5 << '\t' << x1*x1*x2*x3*x4 << '\n';
cout << "Time: " << end - start << " sec" << '\n';
cout << "\n\n";
// free device global memory
CHECK(cudaFree(data1));
CHECK(cudaFree(data2));
CHECK(cudaFree(data3));
CHECK(cudaFree(data4));
CHECK(cudaFree(data5));
CHECK(cudaFree(value));
CHECK(cudaFree(d1));
CHECK(cudaFree(d2));
CHECK(cudaFree(d3));
CHECK(cudaFree(d4));
CHECK(cudaFree(d5));
// reset device
CHECK(cudaDeviceReset());
return 0;
}
|
fdca1c1a6057d1c1546b0ac269da394d806d1ab0.hip | // !!! This is a file automatically generated by hipify!!!
#include "d_quadtree.h"
#include <hip/device_functions.h>
__device__ bool d_QuadTree::isInBounds(point2 const& p)
{
if (p.x > this->bounds.topLeft.x &&
p.y > this->bounds.topLeft.y &&
p.x < this->bounds.bottomRight.x &&
p.y < this->bounds.bottomRight.y
)
return true;
else
return false;
}
__device__ bool d_QuadTree::isInBounds(d_Rect const& r)
{
if (r.topLeft.y >= this->bounds.topLeft.y &&
r.topLeft.x >= this->bounds.topLeft.x &&
r.bottomRight.y <= this->bounds.bottomRight.y &&
r.bottomRight.x <= this->bounds.bottomRight.x)
return true;
else
return false;
}
__device__ bool d_QuadTree::checkCollisons(point2 p, d_Rect& r)
{
d_QuadTree* nodes = treeManager->nodes;
d_QuadTree* current = this, *next = nullptr;
while(true)
{
if (current->isSplited())
{
for(int i=0; i < NODES_NUMBER; ++i)
{
int id = current->getChildren(i);
d_QuadTree* node = &nodes[id];
if (node->bounds.contains(p))
{
next = node;
break;
}
}
if(next == current)
{
printf("b: lvl: %d %flf %lf %lf %lf\n",current->getLevel(),current->bounds.topLeft.x,current->bounds.topLeft.y,
current->bounds.bottomRight.x,current->bounds.bottomRight.y);
for(int i=0; i < NODES_NUMBER; ++i)
{
int id = current->getChildren(i);
d_QuadTree* node = &nodes[id];
printf("ch: lvl: %d %flf %lf %lf %lf\n",node->getLevel(),node->bounds.topLeft.x,node->bounds.topLeft.y,
node->bounds.bottomRight.x,node->bounds.bottomRight.y);
}
return false;
}
}
//tutaj dla kazdego sprawdzenie bisectory lines
if (current->checkCollisionObjs(p, r))//KOLIZJA
return true;
else if(false == current->isSplited() || next == nullptr)
return false;
else
current = next;
}
}
__device__ bool d_QuadTree::checkCollisionObjs(point2 p, d_Rect &r)
{
d_Rect* rects = treeManager->rects;
for(int i = startOwnOff; i < endOff; ++i)
{
if(rects[i].contains(p))
{
r = d_Rect(rects[i].topLeft,
rects[i].bottomRight);
return true;
}
}
return false;
}
__device__ d_Rect d_QuadTree::drawBiggestSquareAtPoint(point2 p)
{
bool isCollision = false;
bool maxReached = false;
const floatingPoint MIN_DIST = .095f;
floatingPoint dist;
d_Rect output(p.x -1,p.y -1, p.x +1,p.y +1);
d_Rect init(p.x -2,p.y -2, p.x +2,p.y +2);
if (bounds.getHeigth() > bounds.getWidth())
dist = bounds.getHeigth();
else
dist = bounds.getWidth();
dist *= BIGGEST_SQUARE_INIT_FACTOR;
if (checkCollisions(output))
return output;
if (checkCollisions(init))
return init;
while((isCollision = checkCollisions(output)) || false == maxReached || dist > MIN_DIST)
{
if(isCollision)
{
if(dist > MIN_DIST)
dist /= 2;
maxReached = true;
output.topLeft.x += dist;
output.topLeft.y += dist;
output.bottomRight.x -= dist;
output.bottomRight.y -= dist;
}
else
{
if(maxReached)
dist /= 2;
output.topLeft.x -= dist;
output.topLeft.y -= dist;
output.bottomRight.x += dist;
output.bottomRight.y += dist;
}
}
if (p.x == output.bottomRight.x)
printf("drawBiggestSquareAtPoint: error\n");
return output;
}
__device__ bool d_QuadTree::checkCollisions(d_Rect const& r, const d_Rect &ignore)
{
if (false == isInBounds(r))
return true;
d_QuadTree* nodes = treeManager->nodes;
d_QuadTree* oldNode, *node = this;
dTreePtr* stackPtr = stack[(blockIdx.x * treeManager->threadInBlock) + threadIdx.x];
bool collisions[NODES_NUMBER];
*stackPtr++ = nullptr; // koniec petli gdy tu trafimy
//printf("Col: %f %f %f %f\n",r.topLeft.x,r.topLeft.y,r.bottomRight.x,r.bottomRight.y);
while (node != nullptr)
{
if (node->checkCollisionsWithObjs(r, ignore))
return true;
if (node->isSplited())
{
#pragma unroll
for (int i = 0; i < NODES_NUMBER; ++i)
{
collisions[i] = nodes[node->getChildren(i)].getBounds().rectsCollision(r);
}
}
else
{
#pragma unroll
for (int i = 0; i < NODES_NUMBER; ++i)
collisions[i] = false;
}
if (false == checkIsAnyCollision(collisions))
{
node = *--stackPtr;
}
else
{
oldNode = node;
for (int i = 0; i < NODES_NUMBER; ++i)
{
if (collisions[i])
{
node = &(nodes[node->getChildren(i)]);
break;
}
}
d_QuadTree* nodes = treeManager->nodes;
#pragma unroll
for (int i = 0; i < NODES_NUMBER; ++i)
{
if (collisions[i] && node != &(nodes[oldNode->children[i]]))
*stackPtr++ = &(nodes[oldNode->children[i]]);
}
}
}
return false;
}
__device__ bool d_QuadTree::checkIsAnyCollision(bool collisions[])
{
#pragma unroll
for (int i = 0; i < NODES_NUMBER; ++i)
{
if (collisions[i])
return true;
}
return false;
}
__device__ bool d_QuadTree::checkCollisionsWithObjs(d_Rect const& r, const d_Rect &ignore)
{
d_Rect* rects = treeManager->rects;
for (int i = startOwnOff; i< endOff; ++i)
if (rects[i] != ignore && rects[i].rectsCollision(r))
return true;
return false;
}
__device__ d_Rect d_QuadTree::createGaussianSurfFrom(d_Rect const & r, floatingPoint const factor) // bez kolizji
{
if (factor < 1)
{
printf("CreateGaussian: Nieprawidlowy wspolczynnik!\n");
return r;
}
floatingPoint factorX = getAdjustedGaussianFactor(r, factor, D_FACTOR_X);
floatingPoint factorY = getAdjustedGaussianFactor(r, factor, D_FACTOR_Y);
return r.createGaussianSurface(factorX, factorY);
}
__device__ floatingPoint d_QuadTree::getAdjustedGaussianFactor(d_Rect const& r, floatingPoint const factor, D_FACTOR_TYPE type)
{
bool isCollision = false;
bool isDividing = true;
bool isFirstIt = true;
floatingPoint adjustedFactor = factor;
floatingPoint leftBound = 1., righBound = factor;
d_Rect surface;
for (int i = 0; i < GAUSSIAN_ACCURACY; i++)
{
surface = (type == D_FACTOR_X) ?
r.createGaussianSurfaceX(adjustedFactor) :
r.createGaussianSurfaceY(adjustedFactor);
isCollision = (!isInBounds(surface) || checkCollisions(surface, r));
if (isFirstIt && !isCollision)
break;
if ((isCollision && !isDividing) ||
(!isCollision && isDividing))
{
isDividing = !isDividing;
}
if (isDividing)
adjustedFactor = righBound = (leftBound + righBound) / 2.;
else
adjustedFactor = leftBound = (leftBound + righBound) / 2.;
isFirstIt = false;
}
return adjustedFactor;
}
| fdca1c1a6057d1c1546b0ac269da394d806d1ab0.cu | #include "d_quadtree.h"
#include <device_functions.h>
__device__ bool d_QuadTree::isInBounds(point2 const& p)
{
if (p.x > this->bounds.topLeft.x &&
p.y > this->bounds.topLeft.y &&
p.x < this->bounds.bottomRight.x &&
p.y < this->bounds.bottomRight.y
)
return true;
else
return false;
}
__device__ bool d_QuadTree::isInBounds(d_Rect const& r)
{
if (r.topLeft.y >= this->bounds.topLeft.y &&
r.topLeft.x >= this->bounds.topLeft.x &&
r.bottomRight.y <= this->bounds.bottomRight.y &&
r.bottomRight.x <= this->bounds.bottomRight.x)
return true;
else
return false;
}
__device__ bool d_QuadTree::checkCollisons(point2 p, d_Rect& r)
{
d_QuadTree* nodes = treeManager->nodes;
d_QuadTree* current = this, *next = nullptr;
while(true)
{
if (current->isSplited())
{
for(int i=0; i < NODES_NUMBER; ++i)
{
int id = current->getChildren(i);
d_QuadTree* node = &nodes[id];
if (node->bounds.contains(p))
{
next = node;
break;
}
}
if(next == current)
{
printf("b: lvl: %d %flf %lf %lf %lf\n",current->getLevel(),current->bounds.topLeft.x,current->bounds.topLeft.y,
current->bounds.bottomRight.x,current->bounds.bottomRight.y);
for(int i=0; i < NODES_NUMBER; ++i)
{
int id = current->getChildren(i);
d_QuadTree* node = &nodes[id];
printf("ch: lvl: %d %flf %lf %lf %lf\n",node->getLevel(),node->bounds.topLeft.x,node->bounds.topLeft.y,
node->bounds.bottomRight.x,node->bounds.bottomRight.y);
}
return false;
}
}
//tutaj dla kazdego sprawdzenie bisectory lines
if (current->checkCollisionObjs(p, r))//KOLIZJA
return true;
else if(false == current->isSplited() || next == nullptr)
return false;
else
current = next;
}
}
__device__ bool d_QuadTree::checkCollisionObjs(point2 p, d_Rect &r)
{
d_Rect* rects = treeManager->rects;
for(int i = startOwnOff; i < endOff; ++i)
{
if(rects[i].contains(p))
{
r = d_Rect(rects[i].topLeft,
rects[i].bottomRight);
return true;
}
}
return false;
}
__device__ d_Rect d_QuadTree::drawBiggestSquareAtPoint(point2 p)
{
bool isCollision = false;
bool maxReached = false;
const floatingPoint MIN_DIST = .095f;
floatingPoint dist;
d_Rect output(p.x -1,p.y -1, p.x +1,p.y +1);
d_Rect init(p.x -2,p.y -2, p.x +2,p.y +2);
if (bounds.getHeigth() > bounds.getWidth())
dist = bounds.getHeigth();
else
dist = bounds.getWidth();
dist *= BIGGEST_SQUARE_INIT_FACTOR;
if (checkCollisions(output))
return output;
if (checkCollisions(init))
return init;
while((isCollision = checkCollisions(output)) || false == maxReached || dist > MIN_DIST)
{
if(isCollision)
{
if(dist > MIN_DIST)
dist /= 2;
maxReached = true;
output.topLeft.x += dist;
output.topLeft.y += dist;
output.bottomRight.x -= dist;
output.bottomRight.y -= dist;
}
else
{
if(maxReached)
dist /= 2;
output.topLeft.x -= dist;
output.topLeft.y -= dist;
output.bottomRight.x += dist;
output.bottomRight.y += dist;
}
}
if (p.x == output.bottomRight.x)
printf("drawBiggestSquareAtPoint: error\n");
return output;
}
__device__ bool d_QuadTree::checkCollisions(d_Rect const& r, const d_Rect &ignore)
{
if (false == isInBounds(r))
return true;
d_QuadTree* nodes = treeManager->nodes;
d_QuadTree* oldNode, *node = this;
dTreePtr* stackPtr = stack[(blockIdx.x * treeManager->threadInBlock) + threadIdx.x];
bool collisions[NODES_NUMBER];
*stackPtr++ = nullptr; // koniec petli gdy tu trafimy
//printf("Col: %f %f %f %f\n",r.topLeft.x,r.topLeft.y,r.bottomRight.x,r.bottomRight.y);
while (node != nullptr)
{
if (node->checkCollisionsWithObjs(r, ignore))
return true;
if (node->isSplited())
{
#pragma unroll
for (int i = 0; i < NODES_NUMBER; ++i)
{
collisions[i] = nodes[node->getChildren(i)].getBounds().rectsCollision(r);
}
}
else
{
#pragma unroll
for (int i = 0; i < NODES_NUMBER; ++i)
collisions[i] = false;
}
if (false == checkIsAnyCollision(collisions))
{
node = *--stackPtr;
}
else
{
oldNode = node;
for (int i = 0; i < NODES_NUMBER; ++i)
{
if (collisions[i])
{
node = &(nodes[node->getChildren(i)]);
break;
}
}
d_QuadTree* nodes = treeManager->nodes;
#pragma unroll
for (int i = 0; i < NODES_NUMBER; ++i)
{
if (collisions[i] && node != &(nodes[oldNode->children[i]]))
*stackPtr++ = &(nodes[oldNode->children[i]]);
}
}
}
return false;
}
__device__ bool d_QuadTree::checkIsAnyCollision(bool collisions[])
{
#pragma unroll
for (int i = 0; i < NODES_NUMBER; ++i)
{
if (collisions[i])
return true;
}
return false;
}
__device__ bool d_QuadTree::checkCollisionsWithObjs(d_Rect const& r, const d_Rect &ignore)
{
d_Rect* rects = treeManager->rects;
for (int i = startOwnOff; i< endOff; ++i)
if (rects[i] != ignore && rects[i].rectsCollision(r))
return true;
return false;
}
__device__ d_Rect d_QuadTree::createGaussianSurfFrom(d_Rect const & r, floatingPoint const factor) // bez kolizji
{
if (factor < 1)
{
printf("CreateGaussian: Nieprawidlowy wspolczynnik!\n");
return r;
}
floatingPoint factorX = getAdjustedGaussianFactor(r, factor, D_FACTOR_X);
floatingPoint factorY = getAdjustedGaussianFactor(r, factor, D_FACTOR_Y);
return r.createGaussianSurface(factorX, factorY);
}
__device__ floatingPoint d_QuadTree::getAdjustedGaussianFactor(d_Rect const& r, floatingPoint const factor, D_FACTOR_TYPE type)
{
bool isCollision = false;
bool isDividing = true;
bool isFirstIt = true;
floatingPoint adjustedFactor = factor;
floatingPoint leftBound = 1., righBound = factor;
d_Rect surface;
for (int i = 0; i < GAUSSIAN_ACCURACY; i++)
{
surface = (type == D_FACTOR_X) ?
r.createGaussianSurfaceX(adjustedFactor) :
r.createGaussianSurfaceY(adjustedFactor);
isCollision = (!isInBounds(surface) || checkCollisions(surface, r));
if (isFirstIt && !isCollision)
break;
if ((isCollision && !isDividing) ||
(!isCollision && isDividing))
{
isDividing = !isDividing;
}
if (isDividing)
adjustedFactor = righBound = (leftBound + righBound) / 2.;
else
adjustedFactor = leftBound = (leftBound + righBound) / 2.;
isFirstIt = false;
}
return adjustedFactor;
}
|
135956d02781aa1e01b408e484eb4f82f2dc8b00.hip | // !!! This is a file automatically generated by hipify!!!
#include "Test1.h"
#include "IsEqualMatrix.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#define BLOCKSIZE 32
/**
* 1warpbank
* bank
* matrix
* matrixTest
* row
* col
*/
__global__ void SharedMemoryBankConflictTest3(float *matrix, float *matrixTest, int row, int col)
{
int x_id = blockDim.x * blockIdx.x + threadIdx.x; //
int y_id = blockDim.y * blockIdx.y + threadIdx.y; //
int index = y_id * col + x_id;
__shared__ float sData[BLOCKSIZE][BLOCKSIZE];
if (x_id < col && y_id < row)
{
sData[threadIdx.y][threadIdx.x] = matrix[index];
__syncthreads();
float data = 0.0f;
for (int i = 0; i < 1; i++)
{
for (int j = 0; j < 1000; j++)
{
data = sData[0][threadIdx.x];
}
}
matrixTest[index] = data;
}
}
/**
*
* bank
* matrix
* matrixTest
* row
* col
*/
__global__ void SharedMemoryBankConflictTest3_Pad(float *matrix, float *matrixTest, int row, int col)
{
int x_id = blockDim.x * blockIdx.x + threadIdx.x; //
int y_id = blockDim.y * blockIdx.y + threadIdx.y; //
int index = y_id * col + x_id;
__shared__ float sData[BLOCKSIZE][BLOCKSIZE];
if (x_id < col && y_id < row)
{
sData[threadIdx.y][threadIdx.x] = matrix[index];
__syncthreads();
float data = 0.0f;
for (int i = 0; i < 1; i++)
{
for (int j = 0; j < 1000; j++)
{
data = sData[threadIdx.y][threadIdx.x];
}
}
matrixTest[index] = data;
}
}
/**
* bank conflict
* matrix_Dev
* row
* col
*/
void Test3(float *matrix_Dev, const int row, const int col)
{
float *matrixTestA_Dev, *matrixTestB_Dev;
hipMalloc((void**)&matrixTestA_Dev, row * col * sizeof(float));
hipMemset(matrixTestA_Dev, 0, row * col * sizeof(float));
hipMalloc((void**)&matrixTestB_Dev, row * col * sizeof(float));
hipMemset(matrixTestB_Dev, 0, row * col * sizeof(float));
float *matrixTestA_Host = (float *)malloc(row * col * sizeof(float));
float *matrixTestB_Host = (float *)malloc(row * col * sizeof(float));
dim3 dimBlock2D(BLOCKSIZE, BLOCKSIZE);
dim3 dimGrid2D((col+ BLOCKSIZE - 1) / dimBlock2D.x, (row + BLOCKSIZE - 1) / dimBlock2D.y);
//
hipEvent_t start_GPU, end_GPU;
float elaspsedTime;
hipEventCreate(&start_GPU);
hipEventCreate(&end_GPU);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
hipEventRecord(start_GPU, 0);
hipLaunchKernelGGL(( SharedMemoryBankConflictTest3), dim3(dimGrid2D), dim3(dimBlock2D), 0, 0, matrix_Dev, matrixTestA_Dev, row, col);
//
hipEventRecord(end_GPU, 0);
hipEventSynchronize(end_GPU);
hipEventElapsedTime(&elaspsedTime, start_GPU, end_GPU);
std::cout << "\nTest3 " << elaspsedTime << "ms." << std::endl;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
hipEventRecord(start_GPU, 0);
hipLaunchKernelGGL(( SharedMemoryBankConflictTest3_Pad), dim3(dimGrid2D), dim3(dimBlock2D), 0, 0, matrix_Dev, matrixTestB_Dev, row, col);
//
hipEventRecord(end_GPU, 0);
hipEventSynchronize(end_GPU);
hipEventElapsedTime(&elaspsedTime, start_GPU, end_GPU);
std::cout << "Test3 " << elaspsedTime << "ms." << std::endl;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
hipMemcpy(matrixTestA_Host, matrixTestA_Dev, row * col * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(matrixTestB_Host, matrixTestB_Dev, row * col * sizeof(float), hipMemcpyDeviceToHost);
std::cout << "" << IsEqualMatrix(matrixTestA_Host, matrixTestB_Host, row, col) << std::endl;
hipEventDestroy(start_GPU);
hipEventDestroy(end_GPU);
hipFree(matrixTestA_Dev);
hipFree(matrixTestB_Dev);
free(matrixTestA_Host);
free(matrixTestB_Host);
} | 135956d02781aa1e01b408e484eb4f82f2dc8b00.cu | #include "Test1.h"
#include "IsEqualMatrix.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#define BLOCKSIZE 32
/**
* 功能:每个块内的每行(1个warp)访问一列(一个bank),没有额外
* 说明:块内每行的计算完全相同,并且会同时访问一个 bank
* 输入:matrix 输入矩阵
* 输出:matrixTest 输出矩阵
* 输入:row 行数
* 输入:col 列数
*/
__global__ void SharedMemoryBankConflictTest3(float *matrix, float *matrixTest, int row, int col)
{
int x_id = blockDim.x * blockIdx.x + threadIdx.x; // 列坐标
int y_id = blockDim.y * blockIdx.y + threadIdx.y; // 行坐标
int index = y_id * col + x_id;
__shared__ float sData[BLOCKSIZE][BLOCKSIZE];
if (x_id < col && y_id < row)
{
sData[threadIdx.y][threadIdx.x] = matrix[index];
__syncthreads();
float data = 0.0f;
for (int i = 0; i < 1; i++)
{
for (int j = 0; j < 1000; j++)
{
data = sData[0][threadIdx.x];
}
}
matrixTest[index] = data;
}
}
/**
* 功能:每个块内的每个元素计算改块内一行的和
* 说明:块内每行的计算完全相同,由于多申请一个,所以每一行的会同时访问不同的 bank
* 输入:matrix 输入矩阵
* 输出:matrixTest 输出矩阵
* 输入:row 行数
* 输入:col 列数
*/
__global__ void SharedMemoryBankConflictTest3_Pad(float *matrix, float *matrixTest, int row, int col)
{
int x_id = blockDim.x * blockIdx.x + threadIdx.x; // 列坐标
int y_id = blockDim.y * blockIdx.y + threadIdx.y; // 行坐标
int index = y_id * col + x_id;
__shared__ float sData[BLOCKSIZE][BLOCKSIZE];
if (x_id < col && y_id < row)
{
sData[threadIdx.y][threadIdx.x] = matrix[index];
__syncthreads();
float data = 0.0f;
for (int i = 0; i < 1; i++)
{
for (int j = 0; j < 1000; j++)
{
data = sData[threadIdx.y][threadIdx.x];
}
}
matrixTest[index] = data;
}
}
/**
* 功能:第一个测试示例,避免 bank conflict
* 输入:matrix_Dev 输入矩阵
* 输入:row 行数
* 输入:col 列数
*/
void Test3(float *matrix_Dev, const int row, const int col)
{
float *matrixTestA_Dev, *matrixTestB_Dev;
cudaMalloc((void**)&matrixTestA_Dev, row * col * sizeof(float));
cudaMemset(matrixTestA_Dev, 0, row * col * sizeof(float));
cudaMalloc((void**)&matrixTestB_Dev, row * col * sizeof(float));
cudaMemset(matrixTestB_Dev, 0, row * col * sizeof(float));
float *matrixTestA_Host = (float *)malloc(row * col * sizeof(float));
float *matrixTestB_Host = (float *)malloc(row * col * sizeof(float));
dim3 dimBlock2D(BLOCKSIZE, BLOCKSIZE);
dim3 dimGrid2D((col+ BLOCKSIZE - 1) / dimBlock2D.x, (row + BLOCKSIZE - 1) / dimBlock2D.y);
// 记录时间
cudaEvent_t start_GPU, end_GPU;
float elaspsedTime;
cudaEventCreate(&start_GPU);
cudaEventCreate(&end_GPU);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cudaEventRecord(start_GPU, 0);
SharedMemoryBankConflictTest3<<<dimGrid2D, dimBlock2D>>>(matrix_Dev, matrixTestA_Dev, row, col);
// 计时结束
cudaEventRecord(end_GPU, 0);
cudaEventSynchronize(end_GPU);
cudaEventElapsedTime(&elaspsedTime, start_GPU, end_GPU);
std::cout << "\nTest3 的运行时间为:" << elaspsedTime << "ms." << std::endl;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cudaEventRecord(start_GPU, 0);
SharedMemoryBankConflictTest3_Pad<<<dimGrid2D, dimBlock2D>>>(matrix_Dev, matrixTestB_Dev, row, col);
// 计时结束
cudaEventRecord(end_GPU, 0);
cudaEventSynchronize(end_GPU);
cudaEventElapsedTime(&elaspsedTime, start_GPU, end_GPU);
std::cout << "Test3 的运行时间为:" << elaspsedTime << "ms." << std::endl;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cudaMemcpy(matrixTestA_Host, matrixTestA_Dev, row * col * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(matrixTestB_Host, matrixTestB_Dev, row * col * sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "结果是否相同:" << IsEqualMatrix(matrixTestA_Host, matrixTestB_Host, row, col) << std::endl;
cudaEventDestroy(start_GPU);
cudaEventDestroy(end_GPU);
cudaFree(matrixTestA_Dev);
cudaFree(matrixTestB_Dev);
free(matrixTestA_Host);
free(matrixTestB_Host);
} |
50fe76f46255f179151b4ce05cd0433027e889c4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2009, Jiri Matela
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <errno.h>
#include <error.h>
#include <fcntl.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
#include "common.h"
#include "components.h"
#include "dwt.h"
struct dwt {
char *srcFilename;
char *outFilename;
unsigned char *srcImg;
int pixWidth;
int pixHeight;
int components;
int dwtLvls;
};
int getImg(char *srcFilename, unsigned char *srcImg, int inputSize) {
// printf("Loading ipnput: %s\n", srcFilename);
char *path = "../../../data/dwt2d/";
char *newSrc = NULL;
if ((newSrc = (char *)malloc(strlen(srcFilename) + strlen(path) + 1)) !=
NULL) {
newSrc[0] = '\0';
strcat(newSrc, path);
strcat(newSrc, srcFilename);
srcFilename = newSrc;
}
printf("Loading ipnput: %s\n", srcFilename);
// srcFilename = strcat("../../data/dwt2d/",srcFilename);
// read image
int i = open(srcFilename, O_RDONLY, 0644);
if (i == -1) {
error(0, errno, "cannot access %s", srcFilename);
return -1;
}
int ret = read(i, srcImg, inputSize);
printf("precteno %d, inputsize %d\n", ret, inputSize);
close(i);
return 0;
}
void usage() {
printf("dwt [otpions] src_img.rgb <out_img.dwt>\n\
-d, --dimension\t\tdimensions of src img, e.g. 1920x1080\n\
-c, --components\t\tnumber of color components, default 3\n\
-b, --depth\t\t\tbit depth, default 8\n\
-l, --level\t\t\tDWT level, default 3\n\
-D, --device\t\t\tcuda device\n\
-f, --forward\t\t\tforward transform\n\
-r, --reverse\t\t\treverse transform\n\
-9, --97\t\t\t9/7 transform\n\
-5, --53\t\t\t5/3 transform\n\
-w --write-visual\t\twrite output in visual (tiled) fashion instead of the linear\n");
}
template <typename T>
void processDWT(struct dwt *d, int forward, int writeVisual) {
int componentSize = d->pixWidth * d->pixHeight * sizeof(T);
T *c_r_out, *backup;
hipMallocManaged(&c_r_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_r_out, 0, componentSize);
cudaCheckError("Memset device memory");
hipMallocManaged(&backup, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
// hipMemset(backup, 0, componentSize);
cudaCheckError("Memset device memory");
if (d->components == 3) {
/* Alloc two more buffers for G and B */
T *c_g_out, *c_b_out;
hipMallocManaged(&c_g_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_g_out, 0, componentSize);
cudaCheckError("Memset device memory");
hipMallocManaged(&c_b_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_b_out, 0, componentSize);
cudaCheckError("Memset device memory");
/* Load components */
T *c_r, *c_g, *c_b;
hipMallocManaged(&c_r, componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
hipMallocManaged(&c_g, componentSize); //< G, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_g, 0, componentSize);
cudaCheckError("Memset device memory");
hipMallocManaged(&c_b, componentSize); //< B, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_b, 0, componentSize);
cudaCheckError("Memset device memory");
rgbToComponents(c_r, c_g, c_b, d->srcImg, d->pixWidth, d->pixHeight);
/* Compute DWT and always store into file */
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls,
forward);
nStage2dDWT(c_g, c_g_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls,
forward);
nStage2dDWT(c_b, c_b_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls,
forward);
// -------test----------
// T *h_r_out=(T*)malloc(componentSize);
// hipMemcpy(h_r_out, c_g_out, componentSize, hipMemcpyDeviceToHost);
// int ii;
// for(ii=0;ii<componentSize/sizeof(T);ii++) {
// fprintf(stderr, "%d ", h_r_out[ii]);
// if((ii+1) % (d->pixWidth) == 0) fprintf(stderr, "\n");
// }
// -------test----------
/* Store DWT to file */
#ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls,
d->outFilename, ".r");
writeNStage2DDWT(c_g_out, d->pixWidth, d->pixHeight, d->dwtLvls,
d->outFilename, ".g");
writeNStage2DDWT(c_b_out, d->pixWidth, d->pixHeight, d->dwtLvls,
d->outFilename, ".b");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename, ".r");
writeLinear(c_g_out, d->pixWidth, d->pixHeight, d->outFilename, ".g");
writeLinear(c_b_out, d->pixWidth, d->pixHeight, d->outFilename, ".b");
}
#endif
hipFree(c_r);
cudaCheckError("Cuda free");
hipFree(c_g);
cudaCheckError("Cuda free");
hipFree(c_b);
cudaCheckError("Cuda free");
hipFree(c_g_out);
cudaCheckError("Cuda free");
hipFree(c_b_out);
cudaCheckError("Cuda free");
} else if (d->components == 1) {
// Load component
T *c_r;
hipMalloc((void **)&(c_r), componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
hipMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
bwToComponent(c_r, d->srcImg, d->pixWidth, d->pixHeight);
// Compute DWT
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls,
forward);
// Store DWT to file
// #ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls,
d->outFilename, ".out");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename,
".lin.out");
}
// #endif
hipFree(c_r);
cudaCheckError("Cuda free");
}
hipFree(c_r_out);
cudaCheckError("Cuda free device");
hipFree(backup);
cudaCheckError("Cuda free device");
}
int main(int argc, char **argv) {
int optindex = 0;
char ch;
struct option longopts[] = {
{"dimension", required_argument, 0, 'd'}, // dimensions of src img
{"components", required_argument, 0,
'c'}, // numger of components of src img
{"depth", required_argument, 0, 'b'}, // bit depth of src img
{"level", required_argument, 0, 'l'}, // level of dwt
{"device", required_argument, 0, 'D'}, // cuda device
{"forward", no_argument, 0, 'f'}, // forward transform
{"reverse", no_argument, 0, 'r'}, // reverse transform
{"97", no_argument, 0, '9'}, // 9/7 transform
{"53", no_argument, 0, '5'}, // 5/3transform
{"write-visual", no_argument, 0, 'w'}, // write output (subbands) in
// visual (tiled) order instead of
// linear
{"help", no_argument, 0, 'h'}};
int pixWidth = 0; //<real pixWidth
int pixHeight = 0; //<real pixHeight
int compCount = 3; // number of components; 3 for RGB or YUV, 4 for RGBA
int bitDepth = 8;
int dwtLvls = 3; // default numuber of DWT levels
int device = 0;
int forward = 1; // forward transform
int dwt97 = 1; // 1=dwt9/7, 0=dwt5/3 transform
int writeVisual =
0; // write output (subbands) in visual (tiled) order instead of linear
char *pos;
while ((ch = getopt_long(argc, argv, "d:c:b:l:D:fr95wh", longopts,
&optindex)) != -1) {
switch (ch) {
case 'd':
pixWidth = atoi(optarg);
pos = strstr(optarg, "x");
if (pos == NULL || pixWidth == 0 || (strlen(pos) >= strlen(optarg))) {
usage();
return -1;
}
pixHeight = atoi(pos + 1);
break;
case 'c':
compCount = atoi(optarg);
break;
case 'b':
bitDepth = atoi(optarg);
break;
case 'l':
dwtLvls = atoi(optarg);
break;
case 'D':
device = atoi(optarg);
break;
case 'f':
forward = 1;
break;
case 'r':
forward = 0;
break;
case '9':
dwt97 = 1;
break;
case '5':
dwt97 = 0;
break;
case 'w':
writeVisual = 1;
break;
case 'h':
usage();
return 0;
case '?':
return -1;
default:
usage();
return -1;
}
}
argc -= optind;
argv += optind;
if (argc == 0) { // at least one filename is expected
printf("Please supply src file name\n");
usage();
return -1;
}
if (pixWidth <= 0 || pixHeight <= 0) {
printf("Wrong or missing dimensions\n");
usage();
return -1;
}
if (forward == 0) {
writeVisual = 0; // do not write visual when RDWT
}
// device init
int devCount;
hipGetDeviceCount(&devCount);
cudaCheckError("Get device count");
if (devCount == 0) {
printf("No CUDA enabled device\n");
return -1;
}
if (device < 0 || device > devCount - 1) {
printf("Selected device %d is out of bound. Devices on your system are in "
"range %d - %d\n",
device, 0, devCount - 1);
return -1;
}
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, device);
cudaCheckError("Get device properties");
if (devProp.major < 1) {
printf("Device %d does not support CUDA\n", device);
return -1;
}
printf("Using device %d: %s\n", device, devProp.name);
hipSetDevice(device);
cudaCheckError("Set selected device");
struct dwt *d;
d = (struct dwt *)malloc(sizeof(struct dwt));
d->srcImg = NULL;
d->pixWidth = pixWidth;
d->pixHeight = pixHeight;
d->components = compCount;
d->dwtLvls = dwtLvls;
// file names
d->srcFilename = (char *)malloc(strlen(argv[0]));
strcpy(d->srcFilename, argv[0]);
if (argc == 1) { // only one filename supplyed
d->outFilename = (char *)malloc(strlen(d->srcFilename) + 4);
strcpy(d->outFilename, d->srcFilename);
strcpy(d->outFilename + strlen(d->srcFilename), ".dwt");
} else {
d->outFilename = strdup(argv[1]);
}
// Input review
printf("Source file:\t\t%s\n", d->srcFilename);
printf(" Dimensions:\t\t%dx%d\n", pixWidth, pixHeight);
printf(" Components count:\t%d\n", compCount);
printf(" Bit depth:\t\t%d\n", bitDepth);
printf(" DWT levels:\t\t%d\n", dwtLvls);
printf(" Forward transform:\t%d\n", forward);
printf(" 9/7 transform:\t\t%d\n", dwt97);
// data sizes
int inputSize =
pixWidth * pixHeight * compCount; //<amount of data (in bytes) to proccess
// load img source image
hipMallocManaged(&d->srcImg, inputSize);
cudaCheckError("Alloc host memory");
if (getImg(d->srcFilename, d->srcImg, inputSize) == -1)
return -1;
/* DWT */
if (forward == 1) {
if (dwt97 == 1)
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
} else { // reverse
if (dwt97 == 1)
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
}
// writeComponent(r_cuda, pixWidth, pixHeight, srcFilename, ".g");
// writeComponent(g_wave_cuda, 512000, ".g");
// writeComponent(g_cuda, componentSize, ".g");
// writeComponent(b_wave_cuda, componentSize, ".b");
hipFree(d->srcImg);
cudaCheckError("Cuda free host");
return 0;
}
| 50fe76f46255f179151b4ce05cd0433027e889c4.cu | /*
* Copyright (c) 2009, Jiri Matela
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <errno.h>
#include <error.h>
#include <fcntl.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
#include "common.h"
#include "components.h"
#include "dwt.h"
struct dwt {
char *srcFilename;
char *outFilename;
unsigned char *srcImg;
int pixWidth;
int pixHeight;
int components;
int dwtLvls;
};
int getImg(char *srcFilename, unsigned char *srcImg, int inputSize) {
// printf("Loading ipnput: %s\n", srcFilename);
char *path = "../../../data/dwt2d/";
char *newSrc = NULL;
if ((newSrc = (char *)malloc(strlen(srcFilename) + strlen(path) + 1)) !=
NULL) {
newSrc[0] = '\0';
strcat(newSrc, path);
strcat(newSrc, srcFilename);
srcFilename = newSrc;
}
printf("Loading ipnput: %s\n", srcFilename);
// srcFilename = strcat("../../data/dwt2d/",srcFilename);
// read image
int i = open(srcFilename, O_RDONLY, 0644);
if (i == -1) {
error(0, errno, "cannot access %s", srcFilename);
return -1;
}
int ret = read(i, srcImg, inputSize);
printf("precteno %d, inputsize %d\n", ret, inputSize);
close(i);
return 0;
}
void usage() {
printf("dwt [otpions] src_img.rgb <out_img.dwt>\n\
-d, --dimension\t\tdimensions of src img, e.g. 1920x1080\n\
-c, --components\t\tnumber of color components, default 3\n\
-b, --depth\t\t\tbit depth, default 8\n\
-l, --level\t\t\tDWT level, default 3\n\
-D, --device\t\t\tcuda device\n\
-f, --forward\t\t\tforward transform\n\
-r, --reverse\t\t\treverse transform\n\
-9, --97\t\t\t9/7 transform\n\
-5, --53\t\t\t5/3 transform\n\
-w --write-visual\t\twrite output in visual (tiled) fashion instead of the linear\n");
}
template <typename T>
void processDWT(struct dwt *d, int forward, int writeVisual) {
int componentSize = d->pixWidth * d->pixHeight * sizeof(T);
T *c_r_out, *backup;
cudaMallocManaged(&c_r_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_r_out, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMallocManaged(&backup, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
// cudaMemset(backup, 0, componentSize);
cudaCheckError("Memset device memory");
if (d->components == 3) {
/* Alloc two more buffers for G and B */
T *c_g_out, *c_b_out;
cudaMallocManaged(&c_g_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_g_out, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMallocManaged(&c_b_out, componentSize); //< aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_b_out, 0, componentSize);
cudaCheckError("Memset device memory");
/* Load components */
T *c_r, *c_g, *c_b;
cudaMallocManaged(&c_r, componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMallocManaged(&c_g, componentSize); //< G, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_g, 0, componentSize);
cudaCheckError("Memset device memory");
cudaMallocManaged(&c_b, componentSize); //< B, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_b, 0, componentSize);
cudaCheckError("Memset device memory");
rgbToComponents(c_r, c_g, c_b, d->srcImg, d->pixWidth, d->pixHeight);
/* Compute DWT and always store into file */
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls,
forward);
nStage2dDWT(c_g, c_g_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls,
forward);
nStage2dDWT(c_b, c_b_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls,
forward);
// -------test----------
// T *h_r_out=(T*)malloc(componentSize);
// cudaMemcpy(h_r_out, c_g_out, componentSize, cudaMemcpyDeviceToHost);
// int ii;
// for(ii=0;ii<componentSize/sizeof(T);ii++) {
// fprintf(stderr, "%d ", h_r_out[ii]);
// if((ii+1) % (d->pixWidth) == 0) fprintf(stderr, "\n");
// }
// -------test----------
/* Store DWT to file */
#ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls,
d->outFilename, ".r");
writeNStage2DDWT(c_g_out, d->pixWidth, d->pixHeight, d->dwtLvls,
d->outFilename, ".g");
writeNStage2DDWT(c_b_out, d->pixWidth, d->pixHeight, d->dwtLvls,
d->outFilename, ".b");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename, ".r");
writeLinear(c_g_out, d->pixWidth, d->pixHeight, d->outFilename, ".g");
writeLinear(c_b_out, d->pixWidth, d->pixHeight, d->outFilename, ".b");
}
#endif
cudaFree(c_r);
cudaCheckError("Cuda free");
cudaFree(c_g);
cudaCheckError("Cuda free");
cudaFree(c_b);
cudaCheckError("Cuda free");
cudaFree(c_g_out);
cudaCheckError("Cuda free");
cudaFree(c_b_out);
cudaCheckError("Cuda free");
} else if (d->components == 1) {
// Load component
T *c_r;
cudaMalloc((void **)&(c_r), componentSize); //< R, aligned component size
cudaCheckError("Alloc device memory");
cudaMemset(c_r, 0, componentSize);
cudaCheckError("Memset device memory");
bwToComponent(c_r, d->srcImg, d->pixWidth, d->pixHeight);
// Compute DWT
nStage2dDWT(c_r, c_r_out, backup, d->pixWidth, d->pixHeight, d->dwtLvls,
forward);
// Store DWT to file
// #ifdef OUTPUT
if (writeVisual) {
writeNStage2DDWT(c_r_out, d->pixWidth, d->pixHeight, d->dwtLvls,
d->outFilename, ".out");
} else {
writeLinear(c_r_out, d->pixWidth, d->pixHeight, d->outFilename,
".lin.out");
}
// #endif
cudaFree(c_r);
cudaCheckError("Cuda free");
}
cudaFree(c_r_out);
cudaCheckError("Cuda free device");
cudaFree(backup);
cudaCheckError("Cuda free device");
}
int main(int argc, char **argv) {
int optindex = 0;
char ch;
struct option longopts[] = {
{"dimension", required_argument, 0, 'd'}, // dimensions of src img
{"components", required_argument, 0,
'c'}, // numger of components of src img
{"depth", required_argument, 0, 'b'}, // bit depth of src img
{"level", required_argument, 0, 'l'}, // level of dwt
{"device", required_argument, 0, 'D'}, // cuda device
{"forward", no_argument, 0, 'f'}, // forward transform
{"reverse", no_argument, 0, 'r'}, // reverse transform
{"97", no_argument, 0, '9'}, // 9/7 transform
{"53", no_argument, 0, '5'}, // 5/3transform
{"write-visual", no_argument, 0, 'w'}, // write output (subbands) in
// visual (tiled) order instead of
// linear
{"help", no_argument, 0, 'h'}};
int pixWidth = 0; //<real pixWidth
int pixHeight = 0; //<real pixHeight
int compCount = 3; // number of components; 3 for RGB or YUV, 4 for RGBA
int bitDepth = 8;
int dwtLvls = 3; // default numuber of DWT levels
int device = 0;
int forward = 1; // forward transform
int dwt97 = 1; // 1=dwt9/7, 0=dwt5/3 transform
int writeVisual =
0; // write output (subbands) in visual (tiled) order instead of linear
char *pos;
while ((ch = getopt_long(argc, argv, "d:c:b:l:D:fr95wh", longopts,
&optindex)) != -1) {
switch (ch) {
case 'd':
pixWidth = atoi(optarg);
pos = strstr(optarg, "x");
if (pos == NULL || pixWidth == 0 || (strlen(pos) >= strlen(optarg))) {
usage();
return -1;
}
pixHeight = atoi(pos + 1);
break;
case 'c':
compCount = atoi(optarg);
break;
case 'b':
bitDepth = atoi(optarg);
break;
case 'l':
dwtLvls = atoi(optarg);
break;
case 'D':
device = atoi(optarg);
break;
case 'f':
forward = 1;
break;
case 'r':
forward = 0;
break;
case '9':
dwt97 = 1;
break;
case '5':
dwt97 = 0;
break;
case 'w':
writeVisual = 1;
break;
case 'h':
usage();
return 0;
case '?':
return -1;
default:
usage();
return -1;
}
}
argc -= optind;
argv += optind;
if (argc == 0) { // at least one filename is expected
printf("Please supply src file name\n");
usage();
return -1;
}
if (pixWidth <= 0 || pixHeight <= 0) {
printf("Wrong or missing dimensions\n");
usage();
return -1;
}
if (forward == 0) {
writeVisual = 0; // do not write visual when RDWT
}
// device init
int devCount;
cudaGetDeviceCount(&devCount);
cudaCheckError("Get device count");
if (devCount == 0) {
printf("No CUDA enabled device\n");
return -1;
}
if (device < 0 || device > devCount - 1) {
printf("Selected device %d is out of bound. Devices on your system are in "
"range %d - %d\n",
device, 0, devCount - 1);
return -1;
}
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, device);
cudaCheckError("Get device properties");
if (devProp.major < 1) {
printf("Device %d does not support CUDA\n", device);
return -1;
}
printf("Using device %d: %s\n", device, devProp.name);
cudaSetDevice(device);
cudaCheckError("Set selected device");
struct dwt *d;
d = (struct dwt *)malloc(sizeof(struct dwt));
d->srcImg = NULL;
d->pixWidth = pixWidth;
d->pixHeight = pixHeight;
d->components = compCount;
d->dwtLvls = dwtLvls;
// file names
d->srcFilename = (char *)malloc(strlen(argv[0]));
strcpy(d->srcFilename, argv[0]);
if (argc == 1) { // only one filename supplyed
d->outFilename = (char *)malloc(strlen(d->srcFilename) + 4);
strcpy(d->outFilename, d->srcFilename);
strcpy(d->outFilename + strlen(d->srcFilename), ".dwt");
} else {
d->outFilename = strdup(argv[1]);
}
// Input review
printf("Source file:\t\t%s\n", d->srcFilename);
printf(" Dimensions:\t\t%dx%d\n", pixWidth, pixHeight);
printf(" Components count:\t%d\n", compCount);
printf(" Bit depth:\t\t%d\n", bitDepth);
printf(" DWT levels:\t\t%d\n", dwtLvls);
printf(" Forward transform:\t%d\n", forward);
printf(" 9/7 transform:\t\t%d\n", dwt97);
// data sizes
int inputSize =
pixWidth * pixHeight * compCount; //<amount of data (in bytes) to proccess
// load img source image
cudaMallocManaged(&d->srcImg, inputSize);
cudaCheckError("Alloc host memory");
if (getImg(d->srcFilename, d->srcImg, inputSize) == -1)
return -1;
/* DWT */
if (forward == 1) {
if (dwt97 == 1)
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
} else { // reverse
if (dwt97 == 1)
processDWT<float>(d, forward, writeVisual);
else // 5/3
processDWT<int>(d, forward, writeVisual);
}
// writeComponent(r_cuda, pixWidth, pixHeight, srcFilename, ".g");
// writeComponent(g_wave_cuda, 512000, ".g");
// writeComponent(g_cuda, componentSize, ".g");
// writeComponent(b_wave_cuda, componentSize, ".b");
cudaFree(d->srcImg);
cudaCheckError("Cuda free host");
return 0;
}
|
819e087e47f55e07d7354ee9b6c5bece27c11eae.hip | // !!! This is a file automatically generated by hipify!!!
#include "funset.hpp"
#include <iostream>
#include <algorithm>
#include <memory>
#include <hip/hip_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_")
#include <device_launch_parameters.h>
#include "common.hpp"
/* __global__: ;;,3.2
;void;,
;,
gridblock,(<<< >>>);
a kernel,(GPUCUDAkernel(
),__global__); */
__global__ static void green_ball(unsigned char* ptr, int width, int height)
{
/* gridDim: ,,,
,,.
dim3
blockDim: ,block.dim3,
block;,,
;
blockIdx: ,;
threadblockgrid,blockIdx.x
[0,gridDim.x-1],blockIdx.y[0, gridDim.y-1].uint3,
blockgrid;
threadIdx: ,;
threadblock;threadIdx.x,
threadIdx.y,threadIdx.z;uint3
,threadblock */
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
/* __shared__: __shared____device__
blockblock
block__shared____constant__
__shared__extern
__shared__CUDA C
__shared__CUDA C
*/
__shared__ float shared[16][16]; // == threads_block
// now calculate the value at that position
const float period = 128.0f;
shared[threadIdx.x][threadIdx.y] = 255 * (sinf(x*2.0f*PI / period) + 1.0f) *(sinf(y*2.0f*PI / period) + 1.0f) / 4.0f;
/* __syncthreads: CUDA
__syncthreads()
__syncthreads();block(shared
memory)(kernel
__syncthreads())clock()
clock()
__syncthreads()block
threadblock
thread */
// removing this syncthreads shows graphically what happens
// when it doesn't exist.this is an example of why we need it.
__syncthreads();
ptr[offset * 4 + 0] = 0;
ptr[offset * 4 + 1] = shared[/*15 - */threadIdx.x][/*15 - */threadIdx.y];
ptr[offset * 4 + 2] = 0;
ptr[offset * 4 + 3] = 255;
}
int green_ball_gpu(unsigned char* ptr, int width, int height, float* elapsed_time)
{
/* hipEvent_t: CUDA event types,, CUDA,GPU
,CUDAGPU,CUDA
GPU, */
hipEvent_t start, stop;
// hipEventCreate: ,
hipEventCreate(&start);
hipEventCreate(&stop);
// hipEventRecord: ,,start
hipEventRecord(start, 0);
const size_t length{ width * height * 4 * sizeof(unsigned char) };
unsigned char* dev{ nullptr };
// hipMalloc:
hipMalloc(&dev, length);
const int threads_block{ 16 };
dim3 blocks(width / threads_block, height / threads_block);
dim3 threads(threads_block, threads_block);
/* <<< >>>: CUDA,,
CUDA,,
;,
,,
;;
kernel,kernel,
GPU,;
API,<<<Dg,Db,Ns,S>>>
,Dgdim3,grid
.Dg,gridDg.x*Dg.y*Dg.zblock;Db
dim3,block.Db,
blockDb.x*Db.y*Db.zthread;Nssize_t,
,
(extern __shared__);Ns,0;S
cudaStream_t,.S,0. */
green_ball << <blocks, threads >> >(dev, width, height);
/* hipMemcpy: ,:
(1). hipMemcpyHostToHost:
(2). hipMemcpyHostToDevice:
(3). hipMemcpyDeviceToHost:
(4). hipMemcpyDeviceToDevice:
(5). hipMemcpyDefault: ,
(CUDA6.0)
cudaMemcpy */
hipMemcpy(ptr, dev, length, hipMemcpyDeviceToHost);
// hipFree: cudaMalloc
hipFree(dev);
// hipEventRecord: ,,stop
hipEventRecord(stop, 0);
// hipEventSynchronize: ,,
hipEventSynchronize(stop);
// cudaEventElapseTime: ,,
hipEventElapsedTime(elapsed_time, start, stop);
// hipEventDestroy: ,
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
| 819e087e47f55e07d7354ee9b6c5bece27c11eae.cu | #include "funset.hpp"
#include <iostream>
#include <algorithm>
#include <memory>
#include <cuda_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_")
#include <device_launch_parameters.h>
#include "common.hpp"
/* __global__: 函数类型限定符;在设备上运行;在主机端调用,计算能力3.2及以上可以在
设备端调用;声明的函数的返回值必须是void类型;对此类型函数的调用是异步的,即在
设备完全完成它的运行之前就返回了;对此类型函数的调用必须指定执行配置,即用于在
设备上执行函数时的grid和block的维度,以及相关的流(即插入<<< >>>运算符);
a kernel,表示此函数为内核函数(运行在GPU上的CUDA并行计算函数称为kernel(内核函
数),内核函数必须通过__global__函数类型限定符定义); */
__global__ static void green_ball(unsigned char* ptr, int width, int height)
{
/* gridDim: 内置变量,用于描述线程网格的维度,对于所有线程块来说,这个
变量是一个常数,用来保存线程格每一维的大小,即每个线程格中线程块的数量.
为dim3类型;
blockDim: 内置变量,用于说明每个block的维度与尺寸.为dim3类型,包含
了block在三个维度上的尺寸信息;对于所有线程块来说,这个变量是一个常数,
保存的是线程块中每一维的线程数量;
blockIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程块的索引;用
于说明当前thread所在的block在整个grid中的位置,blockIdx.x取值范围是
[0,gridDim.x-1],blockIdx.y取值范围是[0, gridDim.y-1].为uint3类型,
包含了一个block在grid中各个维度上的索引信息;
threadIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程索引;用于
说明当前thread在block中的位置;如果线程是一维的可获取threadIdx.x,如果
是二维的还可获取threadIdx.y,如果是三维的还可获取threadIdx.z;为uint3类
型,包含了一个thread在block中各个维度的索引信息 */
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
/* __shared__: 变量类型限定符;使用__shared__限定符,或者与__device__限
定符连用,此时声明的变量位于block中的共享存储器空间中,与block具有相同
的生命周期,仅可通过block内的所有线程访问;__shared__和__constant__变量
默认为是静态存储;在__shared__前可以加extern关键字,但表示的是变量大小
由执行参数确定;__shared__变量在声明时不能初始化;可以将CUDA C的关键字
__shared__添加到变量声明中,这将使这个变量驻留在共享内存中;CUDA C编译
器对共享内存中的变量与普通变量将分别采取不同的处理方式 */
__shared__ float shared[16][16]; // == threads_block
// now calculate the value at that position
const float period = 128.0f;
shared[threadIdx.x][threadIdx.y] = 255 * (sinf(x*2.0f*PI / period) + 1.0f) *(sinf(y*2.0f*PI / period) + 1.0f) / 4.0f;
/* __syncthreads: 对线程块中的线程进行同步;CUDA架构将确保,除非线程块
中的每个线程都执行了__syncthreads(),否则没有任何线程能执行
__syncthreads()之后的指令;在同一个block中的线程通过共享存储器(shared
memory)交换数据,并通过栅栏同步(可以在kernel函数中需要同步的位置调用
__syncthreads()函数)保证线程间能够正确地共享数据;使用clock()函数计时,
在内核函数中要测量的一段代码的开始和结束的位置分别调用一次clock()函数,
并将结果记录下来。由于调用__syncthreads()函数后,一个block中的所有
thread需要的时间是相同的,因此只需要记录每个block执行需要的时间就行了,
而不需要记录每个thread的时间 */
// removing this syncthreads shows graphically what happens
// when it doesn't exist.this is an example of why we need it.
__syncthreads();
ptr[offset * 4 + 0] = 0;
ptr[offset * 4 + 1] = shared[/*15 - */threadIdx.x][/*15 - */threadIdx.y];
ptr[offset * 4 + 2] = 0;
ptr[offset * 4 + 3] = 255;
}
int green_ball_gpu(unsigned char* ptr, int width, int height, float* elapsed_time)
{
/* cudaEvent_t: CUDA event types,结构体类型, CUDA事件,用于测量GPU在某
个任务上花费的时间,CUDA中的事件本质上是一个GPU时间戳,由于CUDA事件是在
GPU上实现的,因此它们不适于对同时包含设备代码和主机代码的混合代码计时 */
cudaEvent_t start, stop;
// cudaEventCreate: 创建一个事件对象,异步启动
cudaEventCreate(&start);
cudaEventCreate(&stop);
// cudaEventRecord: 记录一个事件,异步启动,start记录起始时间
cudaEventRecord(start, 0);
const size_t length{ width * height * 4 * sizeof(unsigned char) };
unsigned char* dev{ nullptr };
// cudaMalloc: 在设备端分配内存
cudaMalloc(&dev, length);
const int threads_block{ 16 };
dim3 blocks(width / threads_block, height / threads_block);
dim3 threads(threads_block, threads_block);
/* <<< >>>: 为CUDA引入的运算符,指定线程网格和线程块维度等,传递执行参
数给CUDA编译器和运行时系统,用于说明内核函数中的线程数量,以及线程是如何
组织的;尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何
启动设备代码,传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函
数调用一样;不同计算能力的设备对线程的总数和组织方式有不同的约束;必须
先为kernel中用到的数组或变量分配好足够的空间,再调用kernel函数,否则在
GPU计算时会发生错误,例如越界等;
使用运行时API时,需要在调用的内核函数名与参数列表直接以<<<Dg,Db,Ns,S>>>
的形式设置执行配置,其中:Dg是一个dim3型变量,用于设置grid的维度和各个
维度上的尺寸.设置好Dg后,grid中将有Dg.x*Dg.y*Dg.z个block;Db是
一个dim3型变量,用于设置block的维度和各个维度上的尺寸.设置好Db后,每个
block中将有Db.x*Db.y*Db.z个thread;Ns是一个size_t型变量,指定各块为此调
用动态分配的共享存储器大小,这些动态分配的存储器可供声明为外部数组
(extern __shared__)的其他任何变量使用;Ns是一个可选参数,默认值为0;S为
cudaStream_t类型,用于设置与内核函数关联的流.S是一个可选参数,默认值0. */
green_ball << <blocks, threads >> >(dev, width, height);
/* cudaMemcpy: 在主机端和设备端拷贝数据,此函数第四个参数仅能是下面之一:
(1). cudaMemcpyHostToHost: 拷贝数据从主机端到主机端
(2). cudaMemcpyHostToDevice: 拷贝数据从主机端到设备端
(3). cudaMemcpyDeviceToHost: 拷贝数据从设备端到主机端
(4). cudaMemcpyDeviceToDevice: 拷贝数据从设备端到设备端
(5). cudaMemcpyDefault: 从指针值自动推断拷贝数据方向,需要支持
统一虚拟寻址(CUDA6.0及以上版本)
cudaMemcpy函数对于主机是同步的 */
cudaMemcpy(ptr, dev, length, cudaMemcpyDeviceToHost);
// cudaFree: 释放设备上由cudaMalloc函数分配的内存
cudaFree(dev);
// cudaEventRecord: 记录一个事件,异步启动,stop记录结束时间
cudaEventRecord(stop, 0);
// cudaEventSynchronize: 事件同步,等待一个事件完成,异步启动
cudaEventSynchronize(stop);
// cudaEventElapseTime: 计算两个事件之间经历的时间,单位为毫秒,异步启动
cudaEventElapsedTime(elapsed_time, start, stop);
// cudaEventDestroy: 销毁事件对象,异步启动
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
ebaf12106856907ecae184a6fd1c757e8bc58e1f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/count.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "nvstrings/NVStrings.h"
#include "./NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../unicode/is_flags.h"
#include "../util.h"
// this will return the number of characters for each string
unsigned int NVStrings::len(int* lengths, bool todevice)
{
unsigned int count = size();
if( lengths==0 || count==0 )
return count;
auto execpol = rmm::exec_policy(0);
int* d_rtn = lengths;
if( !todevice )
d_rtn = device_alloc<int>(count,0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->chars_count();
else
d_rtn[idx] = -1;
});
//
//printCudaError(hipDeviceSynchronize(),"nvs-len");
size_t size = thrust::reduce(execpol->on(0), d_rtn, d_rtn+count, (size_t)0,
[]__device__(int lhs, int rhs) {
if( lhs < 0 )
lhs = 0;
if( rhs < 0 )
rhs = 0;
return lhs + rhs;
});
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(lengths,d_rtn,sizeof(int)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)size;
}
// this will return the number of bytes for each string
size_t NVStrings::byte_count(int* lengths, bool todevice)
{
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
int* d_rtn = lengths;
if( !lengths )
todevice = false; // makes sure we free correctly
if( !todevice )
d_rtn = device_alloc<int>(count,0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->size();
else
d_rtn[idx] = -1;
});
//
//printCudaError(hipDeviceSynchronize(),"nvs-bytes");
size_t size = thrust::reduce(execpol->on(0), d_rtn, d_rtn+count, (size_t)0,
[]__device__(int lhs, int rhs) {
if( lhs < 0 )
lhs = 0;
if( rhs < 0 )
rhs = 0;
return lhs + rhs;
});
if( !todevice )
{ // copy result back to host
if( lengths )
CUDA_TRY( hipMemcpyAsync(lengths,d_rtn,sizeof(int)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)size;
}
//
unsigned int NVStrings::isalnum( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // alnum requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_ALPHANUM(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true );
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isalpha( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // alpha requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_ALPHA(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
//
unsigned int NVStrings::isdigit( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // digit requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_DIGIT(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isspace( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // space requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_SPACE(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isdecimal( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // decimal requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_DECIMAL(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isnumeric( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // numeric requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_NUMERIC(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::islower( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
brc = !IS_ALPHA(flg) || IS_LOWER(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isupper( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
brc = !IS_ALPHA(flg) || IS_UPPER(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::is_empty( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = true; // null is empty
if( dstr )
brc = dstr->empty(); // requires at least one character
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( hipMemcpyAsync(results,d_rtn,sizeof(bool)*count,hipMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
| ebaf12106856907ecae184a6fd1c757e8bc58e1f.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/for_each.h>
#include <thrust/count.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "nvstrings/NVStrings.h"
#include "./NVStringsImpl.h"
#include "../custring_view.cuh"
#include "../unicode/is_flags.h"
#include "../util.h"
// this will return the number of characters for each string
unsigned int NVStrings::len(int* lengths, bool todevice)
{
unsigned int count = size();
if( lengths==0 || count==0 )
return count;
auto execpol = rmm::exec_policy(0);
int* d_rtn = lengths;
if( !todevice )
d_rtn = device_alloc<int>(count,0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->chars_count();
else
d_rtn[idx] = -1;
});
//
//printCudaError(cudaDeviceSynchronize(),"nvs-len");
size_t size = thrust::reduce(execpol->on(0), d_rtn, d_rtn+count, (size_t)0,
[]__device__(int lhs, int rhs) {
if( lhs < 0 )
lhs = 0;
if( rhs < 0 )
rhs = 0;
return lhs + rhs;
});
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(lengths,d_rtn,sizeof(int)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)size;
}
// this will return the number of bytes for each string
size_t NVStrings::byte_count(int* lengths, bool todevice)
{
unsigned int count = size();
if( count==0 )
return 0;
auto execpol = rmm::exec_policy(0);
int* d_rtn = lengths;
if( !lengths )
todevice = false; // makes sure we free correctly
if( !todevice )
d_rtn = device_alloc<int>(count,0);
custring_view** d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_rtn[idx] = dstr->size();
else
d_rtn[idx] = -1;
});
//
//printCudaError(cudaDeviceSynchronize(),"nvs-bytes");
size_t size = thrust::reduce(execpol->on(0), d_rtn, d_rtn+count, (size_t)0,
[]__device__(int lhs, int rhs) {
if( lhs < 0 )
lhs = 0;
if( rhs < 0 )
rhs = 0;
return lhs + rhs;
});
if( !todevice )
{ // copy result back to host
if( lengths )
CUDA_TRY( cudaMemcpyAsync(lengths,d_rtn,sizeof(int)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)size;
}
//
unsigned int NVStrings::isalnum( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // alnum requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_ALPHANUM(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true );
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isalpha( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // alpha requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_ALPHA(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
//
unsigned int NVStrings::isdigit( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // digit requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_DIGIT(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isspace( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // space requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_SPACE(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isdecimal( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // decimal requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_DECIMAL(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isnumeric( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // numeric requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = uni <= 0x00FFFF ? d_flags[uni] : 0;
brc = IS_NUMERIC(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::islower( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
brc = !IS_ALPHA(flg) || IS_LOWER(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::isupper( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
unsigned char* d_flags = get_unicode_flags();
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_flags, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = false;
if( dstr )
{
brc = !dstr->empty(); // requires at least one character
for( auto itr = dstr->begin(); brc && (itr != dstr->end()); itr++ )
{
unsigned int uni = u82u(*itr);
unsigned int flg = (uni <= 0x00FFFF ? d_flags[uni] : 0);
brc = !IS_ALPHA(flg) || IS_UPPER(flg);
}
}
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
unsigned int NVStrings::is_empty( bool* results, bool todevice )
{
unsigned int count = size();
if( count==0 || results==0 )
return 0;
auto execpol = rmm::exec_policy(0);
bool* d_rtn = results;
if( !todevice )
d_rtn = device_alloc<bool>(count,0);
custring_view_array d_strings = pImpl->getStringsPtr();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
bool brc = true; // null is empty
if( dstr )
brc = dstr->empty(); // requires at least one character
d_rtn[idx] = brc;
});
// count the number of trues
int matches = thrust::count(execpol->on(0), d_rtn, d_rtn+count, true);
if( !todevice )
{ // copy result back to host
CUDA_TRY( cudaMemcpyAsync(results,d_rtn,sizeof(bool)*count,cudaMemcpyDeviceToHost))
RMM_FREE(d_rtn,0);
}
return (unsigned int)matches;
}
|
3703f6a128bcf1a5b7aa2f588b444069b607378e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <vector>
#include "KeySearchDevice.h"
#include "CudaHashLookup.h"
#include "CudaHashLookup.cuh"
#include "Logger.h"
#include "util.h"
#define MAX_TARGETS_CONSTANT_MEM 16
__constant__ unsigned int _TARGET_HASH[MAX_TARGETS_CONSTANT_MEM][5];
__constant__ unsigned int _NUM_TARGET_HASHES[1];
__constant__ unsigned int *_BLOOM_FILTER[1];
__constant__ unsigned int _BLOOM_FILTER_MASK[1];
__constant__ unsigned long long _BLOOM_FILTER_MASK64[1];
__constant__ unsigned int _USE_BLOOM_FILTER[1];
static unsigned int swp(unsigned int x)
{
return (x << 24) | ((x << 8) & 0x00ff0000) | ((x >> 8) & 0x0000ff00) | (x >> 24);
}
static void undoRMD160FinalRound(const unsigned int hIn[5], unsigned int hOut[5])
{
unsigned int iv[5] = {
0x67452301,
0xefcdab89,
0x98badcfe,
0x10325476,
0xc3d2e1f0
};
for(int i = 0; i < 5; i++) {
hOut[i] = swp(hIn[i]) - iv[(i + 1) % 5];
}
}
/**
Copies the target hashes to constant memory
*/
hipError_t CudaHashLookup::setTargetConstantMemory(const std::vector<struct hash160> &targets)
{
size_t count = targets.size();
for(size_t i = 0; i < count; i++) {
unsigned int h[5];
undoRMD160FinalRound(targets[i].h, h);
hipError_t err = hipMemcpyToSymbol(_TARGET_HASH, h, sizeof(unsigned int) * 5, i * sizeof(unsigned int) * 5);
if(err) {
return err;
}
}
hipError_t err = hipMemcpyToSymbol(_NUM_TARGET_HASHES, &count, sizeof(unsigned int));
if(err) {
return err;
}
unsigned int useBloomFilter = 0;
err = hipMemcpyToSymbol(_USE_BLOOM_FILTER, &useBloomFilter, sizeof(bool));
if(err) {
return err;
}
return hipSuccess;
}
/**
Returns the optimal bloom filter size in bits given the probability of false-positives and the
number of hash functions
*/
unsigned int CudaHashLookup::getOptimalBloomFilterBits(double p, size_t n)
{
double m = 3.6 * ceil((n * log(p)) / log(1 / pow(2, log(2))));
return (unsigned int)ceil(log(m) / log(2));
}
void CudaHashLookup::initializeBloomFilter(const std::vector<struct hash160> &targets, unsigned int *filter, unsigned int mask)
{
// Use the low 16 bits of each word in the hash as the index into the bloom filter
for(unsigned int i = 0; i < targets.size(); i++) {
unsigned int h[5];
undoRMD160FinalRound(targets[i].h, h);
for(int j = 0; j < 5; j++) {
unsigned int idx = h[j] & mask;
filter[idx / 32] |= (0x01 << (idx % 32));
}
}
}
void CudaHashLookup::initializeBloomFilter64(const std::vector<struct hash160> &targets, unsigned int *filter, unsigned long long mask)
{
for(unsigned int k = 0; k < targets.size(); k++) {
unsigned int hash[5];
unsigned long long idx[5];
undoRMD160FinalRound(targets[k].h, hash);
idx[0] = ((unsigned long long)hash[0] << 32 | hash[1]) & mask;
idx[1] = ((unsigned long long)hash[2] << 32 | hash[3]) & mask;
idx[2] = ((unsigned long long)(hash[0]^hash[1]) << 32 | (hash[1]^hash[2])) & mask;
idx[3] = ((unsigned long long)(hash[2]^hash[3]) << 32 | (hash[3] ^ hash[4])) & mask;
idx[4] = ((unsigned long long)(hash[0]^hash[3]) << 32 | (hash[1]^hash[3])) & mask;
for(int i = 0; i < 5; i++) {
filter[idx[i] / 32] |= (0x01 << (idx[i] % 32));
}
}
}
/**
Populates the bloom filter with the target hashes
*/
hipError_t CudaHashLookup::setTargetBloomFilter(const std::vector<struct hash160> &targets)
{
unsigned int bloomFilterBits = getOptimalBloomFilterBits(1.0e-9, targets.size());
unsigned long long bloomFilterSizeWords = (unsigned long long)1 << (bloomFilterBits - 5);
unsigned long long bloomFilterBytes = (unsigned long long)1 << (bloomFilterBits - 3);
unsigned long long bloomFilterMask = (((unsigned long long)1 << bloomFilterBits) - 1);
Logger::log(LogLevel::Info, "Allocating bloom filter (" + util::format("%.1f", (double)bloomFilterBytes/(double)(1024*1024)) + "MB)");
unsigned int *filter = NULL;
try {
filter = new unsigned int[bloomFilterSizeWords];
} catch(std::bad_alloc) {
Logger::log(LogLevel::Error, "Out of system memory");
return hipErrorMemoryAllocation;
}
hipError_t err = hipMalloc(&_bloomFilterPtr, bloomFilterBytes);
if(err) {
Logger::log(LogLevel::Error, "Device error: " + std::string(hipGetErrorString(err)));
delete[] filter;
return err;
}
memset(filter, 0, sizeof(unsigned int) * bloomFilterSizeWords);
if(bloomFilterBits > 32) {
initializeBloomFilter64(targets, filter, bloomFilterMask);
} else {
initializeBloomFilter(targets, filter, (unsigned int)bloomFilterMask);
}
// Copy to device
err = hipMemcpy(_bloomFilterPtr, filter, sizeof(unsigned int) * bloomFilterSizeWords, hipMemcpyHostToDevice);
if(err) {
hipFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
delete[] filter;
return err;
}
// Copy device memory pointer to constant memory
err = hipMemcpyToSymbol(_BLOOM_FILTER, &_bloomFilterPtr, sizeof(unsigned int *));
if(err) {
hipFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
delete[] filter;
return err;
}
// Copy device memory pointer to constant memory
if(bloomFilterBits <= 32) {
err = hipMemcpyToSymbol(_BLOOM_FILTER_MASK, &bloomFilterMask, sizeof(unsigned int *));
if(err) {
hipFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
delete[] filter;
return err;
}
} else {
err = hipMemcpyToSymbol(_BLOOM_FILTER_MASK64, &bloomFilterMask, sizeof(unsigned long long *));
if(err) {
hipFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
delete[] filter;
return err;
}
}
unsigned int useBloomFilter = bloomFilterBits <= 32 ? 1 : 2;
err = hipMemcpyToSymbol(_USE_BLOOM_FILTER, &useBloomFilter, sizeof(unsigned int));
delete[] filter;
return err;
}
/**
*Copies the target hashes to either constant memory, or the bloom filter depending
on how many targets there are
*/
hipError_t CudaHashLookup::setTargets(const std::vector<struct hash160> &targets)
{
cleanup();
if(targets.size() <= MAX_TARGETS_CONSTANT_MEM) {
return setTargetConstantMemory(targets);
} else {
return setTargetBloomFilter(targets);
}
}
void CudaHashLookup::cleanup()
{
if(_bloomFilterPtr != NULL) {
hipFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
}
}
__device__ bool checkBloomFilter(const unsigned int hash[5])
{
bool foundMatch = true;
unsigned int mask = _BLOOM_FILTER_MASK[0];
unsigned int *bloomFilter = _BLOOM_FILTER[0];
for(int i = 0; i < 5; i++) {
unsigned int idx = hash[i] & mask;
unsigned int f = bloomFilter[idx / 32];
if((f & (0x01 << (idx % 32))) == 0) {
foundMatch = false;
}
}
return foundMatch;
}
__device__ bool checkBloomFilter64(const unsigned int hash[5])
{
bool foundMatch = true;
unsigned long long mask = _BLOOM_FILTER_MASK64[0];
unsigned int *bloomFilter = _BLOOM_FILTER[0];
unsigned long long idx[5];
idx[0] = ((unsigned long long)hash[0] << 32 | hash[1]) & mask;
idx[1] = ((unsigned long long)hash[2] << 32 | hash[3]) & mask;
idx[2] = ((unsigned long long)(hash[0] ^ hash[1]) << 32 | (hash[1] ^ hash[2])) & mask;
idx[3] = ((unsigned long long)(hash[2] ^ hash[3]) << 32 | (hash[3] ^ hash[4])) & mask;
idx[4] = ((unsigned long long)(hash[0] ^ hash[3]) << 32 | (hash[1] ^ hash[3])) & mask;
for(int i = 0; i < 5; i++) {
unsigned int f = bloomFilter[idx[i] / 32];
if((f & (0x01 << (idx[i] % 32))) == 0) {
foundMatch = false;
}
}
return foundMatch;
}
__device__ bool checkHash(const unsigned int hash[5])
{
bool foundMatch = false;
if(*_USE_BLOOM_FILTER == 1) {
return checkBloomFilter(hash);
} else if(*_USE_BLOOM_FILTER == 2) {
return checkBloomFilter64(hash);
} else {
for(int j = 0; j < *_NUM_TARGET_HASHES; j++) {
bool equal = true;
for(int i = 0; i < 5; i++) {
equal &= (hash[i] == _TARGET_HASH[j][i]);
}
foundMatch |= equal;
}
}
return foundMatch;
} | 3703f6a128bcf1a5b7aa2f588b444069b607378e.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#include <vector>
#include "KeySearchDevice.h"
#include "CudaHashLookup.h"
#include "CudaHashLookup.cuh"
#include "Logger.h"
#include "util.h"
#define MAX_TARGETS_CONSTANT_MEM 16
__constant__ unsigned int _TARGET_HASH[MAX_TARGETS_CONSTANT_MEM][5];
__constant__ unsigned int _NUM_TARGET_HASHES[1];
__constant__ unsigned int *_BLOOM_FILTER[1];
__constant__ unsigned int _BLOOM_FILTER_MASK[1];
__constant__ unsigned long long _BLOOM_FILTER_MASK64[1];
__constant__ unsigned int _USE_BLOOM_FILTER[1];
static unsigned int swp(unsigned int x)
{
return (x << 24) | ((x << 8) & 0x00ff0000) | ((x >> 8) & 0x0000ff00) | (x >> 24);
}
static void undoRMD160FinalRound(const unsigned int hIn[5], unsigned int hOut[5])
{
unsigned int iv[5] = {
0x67452301,
0xefcdab89,
0x98badcfe,
0x10325476,
0xc3d2e1f0
};
for(int i = 0; i < 5; i++) {
hOut[i] = swp(hIn[i]) - iv[(i + 1) % 5];
}
}
/**
Copies the target hashes to constant memory
*/
cudaError_t CudaHashLookup::setTargetConstantMemory(const std::vector<struct hash160> &targets)
{
size_t count = targets.size();
for(size_t i = 0; i < count; i++) {
unsigned int h[5];
undoRMD160FinalRound(targets[i].h, h);
cudaError_t err = cudaMemcpyToSymbol(_TARGET_HASH, h, sizeof(unsigned int) * 5, i * sizeof(unsigned int) * 5);
if(err) {
return err;
}
}
cudaError_t err = cudaMemcpyToSymbol(_NUM_TARGET_HASHES, &count, sizeof(unsigned int));
if(err) {
return err;
}
unsigned int useBloomFilter = 0;
err = cudaMemcpyToSymbol(_USE_BLOOM_FILTER, &useBloomFilter, sizeof(bool));
if(err) {
return err;
}
return cudaSuccess;
}
/**
Returns the optimal bloom filter size in bits given the probability of false-positives and the
number of hash functions
*/
unsigned int CudaHashLookup::getOptimalBloomFilterBits(double p, size_t n)
{
double m = 3.6 * ceil((n * log(p)) / log(1 / pow(2, log(2))));
return (unsigned int)ceil(log(m) / log(2));
}
void CudaHashLookup::initializeBloomFilter(const std::vector<struct hash160> &targets, unsigned int *filter, unsigned int mask)
{
// Use the low 16 bits of each word in the hash as the index into the bloom filter
for(unsigned int i = 0; i < targets.size(); i++) {
unsigned int h[5];
undoRMD160FinalRound(targets[i].h, h);
for(int j = 0; j < 5; j++) {
unsigned int idx = h[j] & mask;
filter[idx / 32] |= (0x01 << (idx % 32));
}
}
}
void CudaHashLookup::initializeBloomFilter64(const std::vector<struct hash160> &targets, unsigned int *filter, unsigned long long mask)
{
for(unsigned int k = 0; k < targets.size(); k++) {
unsigned int hash[5];
unsigned long long idx[5];
undoRMD160FinalRound(targets[k].h, hash);
idx[0] = ((unsigned long long)hash[0] << 32 | hash[1]) & mask;
idx[1] = ((unsigned long long)hash[2] << 32 | hash[3]) & mask;
idx[2] = ((unsigned long long)(hash[0]^hash[1]) << 32 | (hash[1]^hash[2])) & mask;
idx[3] = ((unsigned long long)(hash[2]^hash[3]) << 32 | (hash[3] ^ hash[4])) & mask;
idx[4] = ((unsigned long long)(hash[0]^hash[3]) << 32 | (hash[1]^hash[3])) & mask;
for(int i = 0; i < 5; i++) {
filter[idx[i] / 32] |= (0x01 << (idx[i] % 32));
}
}
}
/**
Populates the bloom filter with the target hashes
*/
cudaError_t CudaHashLookup::setTargetBloomFilter(const std::vector<struct hash160> &targets)
{
unsigned int bloomFilterBits = getOptimalBloomFilterBits(1.0e-9, targets.size());
unsigned long long bloomFilterSizeWords = (unsigned long long)1 << (bloomFilterBits - 5);
unsigned long long bloomFilterBytes = (unsigned long long)1 << (bloomFilterBits - 3);
unsigned long long bloomFilterMask = (((unsigned long long)1 << bloomFilterBits) - 1);
Logger::log(LogLevel::Info, "Allocating bloom filter (" + util::format("%.1f", (double)bloomFilterBytes/(double)(1024*1024)) + "MB)");
unsigned int *filter = NULL;
try {
filter = new unsigned int[bloomFilterSizeWords];
} catch(std::bad_alloc) {
Logger::log(LogLevel::Error, "Out of system memory");
return cudaErrorMemoryAllocation;
}
cudaError_t err = cudaMalloc(&_bloomFilterPtr, bloomFilterBytes);
if(err) {
Logger::log(LogLevel::Error, "Device error: " + std::string(cudaGetErrorString(err)));
delete[] filter;
return err;
}
memset(filter, 0, sizeof(unsigned int) * bloomFilterSizeWords);
if(bloomFilterBits > 32) {
initializeBloomFilter64(targets, filter, bloomFilterMask);
} else {
initializeBloomFilter(targets, filter, (unsigned int)bloomFilterMask);
}
// Copy to device
err = cudaMemcpy(_bloomFilterPtr, filter, sizeof(unsigned int) * bloomFilterSizeWords, cudaMemcpyHostToDevice);
if(err) {
cudaFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
delete[] filter;
return err;
}
// Copy device memory pointer to constant memory
err = cudaMemcpyToSymbol(_BLOOM_FILTER, &_bloomFilterPtr, sizeof(unsigned int *));
if(err) {
cudaFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
delete[] filter;
return err;
}
// Copy device memory pointer to constant memory
if(bloomFilterBits <= 32) {
err = cudaMemcpyToSymbol(_BLOOM_FILTER_MASK, &bloomFilterMask, sizeof(unsigned int *));
if(err) {
cudaFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
delete[] filter;
return err;
}
} else {
err = cudaMemcpyToSymbol(_BLOOM_FILTER_MASK64, &bloomFilterMask, sizeof(unsigned long long *));
if(err) {
cudaFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
delete[] filter;
return err;
}
}
unsigned int useBloomFilter = bloomFilterBits <= 32 ? 1 : 2;
err = cudaMemcpyToSymbol(_USE_BLOOM_FILTER, &useBloomFilter, sizeof(unsigned int));
delete[] filter;
return err;
}
/**
*Copies the target hashes to either constant memory, or the bloom filter depending
on how many targets there are
*/
cudaError_t CudaHashLookup::setTargets(const std::vector<struct hash160> &targets)
{
cleanup();
if(targets.size() <= MAX_TARGETS_CONSTANT_MEM) {
return setTargetConstantMemory(targets);
} else {
return setTargetBloomFilter(targets);
}
}
void CudaHashLookup::cleanup()
{
if(_bloomFilterPtr != NULL) {
cudaFree(_bloomFilterPtr);
_bloomFilterPtr = NULL;
}
}
__device__ bool checkBloomFilter(const unsigned int hash[5])
{
bool foundMatch = true;
unsigned int mask = _BLOOM_FILTER_MASK[0];
unsigned int *bloomFilter = _BLOOM_FILTER[0];
for(int i = 0; i < 5; i++) {
unsigned int idx = hash[i] & mask;
unsigned int f = bloomFilter[idx / 32];
if((f & (0x01 << (idx % 32))) == 0) {
foundMatch = false;
}
}
return foundMatch;
}
__device__ bool checkBloomFilter64(const unsigned int hash[5])
{
bool foundMatch = true;
unsigned long long mask = _BLOOM_FILTER_MASK64[0];
unsigned int *bloomFilter = _BLOOM_FILTER[0];
unsigned long long idx[5];
idx[0] = ((unsigned long long)hash[0] << 32 | hash[1]) & mask;
idx[1] = ((unsigned long long)hash[2] << 32 | hash[3]) & mask;
idx[2] = ((unsigned long long)(hash[0] ^ hash[1]) << 32 | (hash[1] ^ hash[2])) & mask;
idx[3] = ((unsigned long long)(hash[2] ^ hash[3]) << 32 | (hash[3] ^ hash[4])) & mask;
idx[4] = ((unsigned long long)(hash[0] ^ hash[3]) << 32 | (hash[1] ^ hash[3])) & mask;
for(int i = 0; i < 5; i++) {
unsigned int f = bloomFilter[idx[i] / 32];
if((f & (0x01 << (idx[i] % 32))) == 0) {
foundMatch = false;
}
}
return foundMatch;
}
__device__ bool checkHash(const unsigned int hash[5])
{
bool foundMatch = false;
if(*_USE_BLOOM_FILTER == 1) {
return checkBloomFilter(hash);
} else if(*_USE_BLOOM_FILTER == 2) {
return checkBloomFilter64(hash);
} else {
for(int j = 0; j < *_NUM_TARGET_HASHES; j++) {
bool equal = true;
for(int i = 0; i < 5; i++) {
equal &= (hash[i] == _TARGET_HASH[j][i]);
}
foundMatch |= equal;
}
}
return foundMatch;
} |
3c2802a28ac0f986fd3facd533b182ab2b648b7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Recursive Gaussian filter
*/
#ifndef _GAUSSIAN_KERNEL_H_
#define _GAUSSIAN_KERNEL_H_
#include <cutil_math.h>
#define BLOCK_DIM 16
#define CLAMP_TO_EDGE 1
// RGBA version
// reads from 32-bit uint array holding 8-bit RGBA
// convert floating point rgba color to 32-bit integer
extern "C"
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
// convert from 32-bit int to float4
extern "C"
__device__ float4 rgbaIntToFloat(uint c)
{
float4 rgba;
rgba.x = (c & 0xff) / 255.0f;
rgba.y = ((c>>8) & 0xff) / 255.0f;
rgba.z = ((c>>16) & 0xff) / 255.0f;
rgba.w = ((c>>24) & 0xff) / 255.0f;
return rgba;
}
extern "C"
// Transpose kernel (see transpose SDK sample for details)
__global__ void d_transpose(uint *odata, uint *idata, int width, int height)
{
__shared__ uint block[BLOCK_DIM][BLOCK_DIM+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
// simple 1st order recursive filter
// processes one column per thread
extern "C"
__global__ void
d_simpleRecursive_rgba(uint *id, uint *od, int w, int h, float a)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
id += x; // advance pointers to correct column
od += x;
// forward pass
float4 yp = rgbaIntToFloat(*id); // previous output
for (int y = 0; y < h; y++) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = xc + a*(yp - xc); // simple lerp between current and previous value
*od = rgbaFloatToInt(yc);
id += w; od += w; // move to next row
yp = yc;
}
// reset pointers to point to last element in column
id -= w;
od -= w;
// reverse pass
// ensures response is symmetrical
yp = rgbaIntToFloat(*id);
for (int y = h-1; y >= 0; y--) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = xc + a*(yp - xc);
*od = rgbaFloatToInt((rgbaIntToFloat(*od) + yc)*0.5f);
id -= w; od -= w; // move to previous row
yp = yc;
}
}
// recursive Gaussian filter
extern "C"
__global__ void
d_recursiveGaussian_rgba(uint *id, uint *od, int w, int h, float a0, float a1, float a2, float a3, float b1, float b2, float coefp, float coefn)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
id += x; // advance pointers to correct column
od += x;
// forward pass
float4 xp = make_float4(0.0f); // previous input
float4 yp = make_float4(0.0f); // previous output
float4 yb = make_float4(0.0f); // previous output by 2
#if CLAMP_TO_EDGE
xp = rgbaIntToFloat(*id); yb = coefp*xp; yp = yb;
#endif
for (int y = 0; y < h; y++) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = a0*xc + a1*xp - b1*yp - b2*yb;
*od = rgbaFloatToInt(yc);
id += w; od += w; // move to next row
xp = xc; yb = yp; yp = yc;
}
// reset pointers to point to last element in column
id -= w;
od -= w;
// reverse pass
// ensures response is symmetrical
float4 xn = make_float4(0.0f);
float4 xa = make_float4(0.0f);
float4 yn = make_float4(0.0f);
float4 ya = make_float4(0.0f);
#if CLAMP_TO_EDGE
xn = xa = rgbaIntToFloat(*id); yn = coefn*xn; ya = yn;
#endif
for (int y = h-1; y >= 0; y--) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = a2*xn + a3*xa - b1*yn - b2*ya;
xa = xn; xn = xc; ya = yn; yn = yc;
*od = rgbaFloatToInt(rgbaIntToFloat(*od) + yc);
id -= w; od -= w; // move to previous row
}
}
#endif // #ifndef _GAUSSIAN_KERNEL_H_
| 3c2802a28ac0f986fd3facd533b182ab2b648b7b.cu | /*
Recursive Gaussian filter
*/
#ifndef _GAUSSIAN_KERNEL_H_
#define _GAUSSIAN_KERNEL_H_
#include <cutil_math.h>
#define BLOCK_DIM 16
#define CLAMP_TO_EDGE 1
// RGBA version
// reads from 32-bit uint array holding 8-bit RGBA
// convert floating point rgba color to 32-bit integer
extern "C"
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
// convert from 32-bit int to float4
extern "C"
__device__ float4 rgbaIntToFloat(uint c)
{
float4 rgba;
rgba.x = (c & 0xff) / 255.0f;
rgba.y = ((c>>8) & 0xff) / 255.0f;
rgba.z = ((c>>16) & 0xff) / 255.0f;
rgba.w = ((c>>24) & 0xff) / 255.0f;
return rgba;
}
extern "C"
// Transpose kernel (see transpose SDK sample for details)
__global__ void d_transpose(uint *odata, uint *idata, int width, int height)
{
__shared__ uint block[BLOCK_DIM][BLOCK_DIM+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
// simple 1st order recursive filter
// processes one column per thread
extern "C"
__global__ void
d_simpleRecursive_rgba(uint *id, uint *od, int w, int h, float a)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
id += x; // advance pointers to correct column
od += x;
// forward pass
float4 yp = rgbaIntToFloat(*id); // previous output
for (int y = 0; y < h; y++) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = xc + a*(yp - xc); // simple lerp between current and previous value
*od = rgbaFloatToInt(yc);
id += w; od += w; // move to next row
yp = yc;
}
// reset pointers to point to last element in column
id -= w;
od -= w;
// reverse pass
// ensures response is symmetrical
yp = rgbaIntToFloat(*id);
for (int y = h-1; y >= 0; y--) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = xc + a*(yp - xc);
*od = rgbaFloatToInt((rgbaIntToFloat(*od) + yc)*0.5f);
id -= w; od -= w; // move to previous row
yp = yc;
}
}
// recursive Gaussian filter
extern "C"
__global__ void
d_recursiveGaussian_rgba(uint *id, uint *od, int w, int h, float a0, float a1, float a2, float a3, float b1, float b2, float coefp, float coefn)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
id += x; // advance pointers to correct column
od += x;
// forward pass
float4 xp = make_float4(0.0f); // previous input
float4 yp = make_float4(0.0f); // previous output
float4 yb = make_float4(0.0f); // previous output by 2
#if CLAMP_TO_EDGE
xp = rgbaIntToFloat(*id); yb = coefp*xp; yp = yb;
#endif
for (int y = 0; y < h; y++) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = a0*xc + a1*xp - b1*yp - b2*yb;
*od = rgbaFloatToInt(yc);
id += w; od += w; // move to next row
xp = xc; yb = yp; yp = yc;
}
// reset pointers to point to last element in column
id -= w;
od -= w;
// reverse pass
// ensures response is symmetrical
float4 xn = make_float4(0.0f);
float4 xa = make_float4(0.0f);
float4 yn = make_float4(0.0f);
float4 ya = make_float4(0.0f);
#if CLAMP_TO_EDGE
xn = xa = rgbaIntToFloat(*id); yn = coefn*xn; ya = yn;
#endif
for (int y = h-1; y >= 0; y--) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = a2*xn + a3*xa - b1*yn - b2*ya;
xa = xn; xn = xc; ya = yn; yn = yc;
*od = rgbaFloatToInt(rgbaIntToFloat(*od) + yc);
id -= w; od -= w; // move to previous row
}
}
#endif // #ifndef _GAUSSIAN_KERNEL_H_
|
1dd8f8e13a20e9641d3fd81b5707a4b8afd40980.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*************************************************************************
GPU Version:
Tsinghua University, Aug. 2012.
Written by Yun Fei in collaboration with
W. Wang and B. Wang
Original:
Optimization Technology Center.
Argonne National Laboratory and Northwestern University.
Written by Ciyou Zhu in collaboration with
R.H. Byrd, P. Lu-Chen and J. Nocedal.
Contributors:
* Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to
pseudocode.
This software is freely available, but we expect that all publications
describing work using this software, or all commercial products using it,
quote at least one of the references given below:
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for
Bound Constrained Optimization, (1995), SIAM Journal on Scientific
and Statistical Computing , 16, 5, pp. 1190-1208.
* C. Zhu, R.H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization
(1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4,
pp. 550 - 560.
*************************************************************************/
#include "lbfgsbcuda.h"
#include <thrust/device_vector.h>
#include <thrust/scan.h>
namespace lbfgsbcuda {
__global__
void kernel0(
int* index,
const int* iwhere,
int* temp_ind1,
int* temp_ind2,
int nfree,
int n
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n)
return;
int k = index[i];
int iwk = iwhere[k];
int t1, t2;
if(i < nfree && iwk > 0) {
t1 = 1;
t2 = 0;
} else if(i >= nfree && iwk <= 0) {
t1 = 0;
t2 = 1;
} else {
t1 = t2 = 0;
}
temp_ind1[i] = t1;
temp_ind2[i] = t2;
}
__global__
void kernel1(
const int* index,
const int* temp_ind1,
const int* temp_ind2,
const int* temp_ind3,
const int* temp_ind4,
int* indx2,
int n
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n)
return;
int k = index[i];
if(temp_ind1[i])
{
indx2[n - temp_ind3[i]] = k;
} else if(temp_ind2[i]) {
indx2[temp_ind4[i] - 1] = k;
}
}
__global__
void kernel2(
const int* iwhere,
int* temp_ind1,
int* temp_ind2,
int n
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n)
return;
int iwi = iwhere[i];
if(iwi <= 0)
{
temp_ind1[i] = 1;
temp_ind2[i] = 0;
} else {
temp_ind1[i] = 0;
temp_ind2[i] = 1;
}
}
__global__
void kernel3(
int* index,
const int* iwhere,
const int* temp_ind1,
const int* temp_ind2,
int n
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n)
return;
int iwi = iwhere[i];
if(iwi <= 0)
{
index[temp_ind1[i] - 1] = i;
} else {
index[n - temp_ind2[i]] = i;
}
}
namespace freev {
void prog0(
const int& n,
int& nfree,
int* index,
int& nenter,
int& ileave,
int* indx2,
const int* iwhere,
bool& wrk,
const bool& updatd,
const bool& cnstnd,
const int& iter,
int* temp_ind1,
int* temp_ind2,
int* temp_ind3,
int* temp_ind4
)
{
nenter = -1;
ileave = n;
if( iter > 0 && cnstnd )
{
CheckBuffer_int(iwhere, n, n);
CheckBuffer_int(index, n, n);
hipLaunchKernelGGL(( kernel0), dim3(iDivUp(n, 512)), dim3(512), 0, 0,
index, iwhere, temp_ind1, temp_ind2, nfree, n);
CheckBuffer_int(temp_ind1, n, n);
CheckBuffer_int(temp_ind2, n, n);
thrust::device_ptr<int> dptr_ind1(temp_ind1);
thrust::device_ptr<int> dptr_ind2(temp_ind2);
thrust::device_ptr<int> dptr_ind3(temp_ind3);
thrust::device_ptr<int> dptr_ind4(temp_ind4);
thrust::inclusive_scan(dptr_ind1, dptr_ind1 + n, dptr_ind3);
thrust::inclusive_scan(dptr_ind2, dptr_ind2 + n, dptr_ind4);
CheckBuffer_int(temp_ind3, n, n);
CheckBuffer_int(temp_ind4, n, n);
hipLaunchKernelGGL(( kernel1), dim3(iDivUp(n, 512)), dim3(512), 0, 0,
index, temp_ind1, temp_ind2, temp_ind3, temp_ind4, indx2, n);
CheckBuffer_int(index, n, n);
CheckBuffer_int(indx2, n, n);
cutilSafeCall(hipMemcpy(&ileave, temp_ind3 + (n - 1), sizeof(int), hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(&nenter, temp_ind4 + (n - 1), sizeof(int), hipMemcpyDeviceToHost));
ileave = n - ileave;
nenter = nenter - 1;
}
wrk = ileave < n || nenter >= 0 || updatd;
CheckBuffer_int(iwhere, n, n);
hipLaunchKernelGGL(( kernel2), dim3(iDivUp(n, 512)), dim3(512), 0, 0,
iwhere, temp_ind1, temp_ind2, n);
CheckBuffer_int(temp_ind1, n, n);
CheckBuffer_int(temp_ind2, n, n);
thrust::device_ptr<int> dptr_ind1(temp_ind1);
thrust::device_ptr<int> dptr_ind2(temp_ind2);
thrust::inclusive_scan(dptr_ind1, dptr_ind1 + n, dptr_ind1);
thrust::inclusive_scan(dptr_ind2, dptr_ind2 + n, dptr_ind2);
CheckBuffer_int(temp_ind1, n, n);
CheckBuffer_int(temp_ind2, n, n);
hipLaunchKernelGGL(( kernel3), dim3(iDivUp(n, 512)), dim3(512), 0, 0,
index, iwhere, temp_ind1, temp_ind2, n);
CheckBuffer_int(index, n, n);
cutilSafeCall(hipMemcpy(&nfree, temp_ind1 + (n - 1), sizeof(int), hipMemcpyDeviceToHost));
}
};
};
| 1dd8f8e13a20e9641d3fd81b5707a4b8afd40980.cu | /*************************************************************************
GPU Version:
Tsinghua University, Aug. 2012.
Written by Yun Fei in collaboration with
W. Wang and B. Wang
Original:
Optimization Technology Center.
Argonne National Laboratory and Northwestern University.
Written by Ciyou Zhu in collaboration with
R.H. Byrd, P. Lu-Chen and J. Nocedal.
Contributors:
* Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to
pseudocode.
This software is freely available, but we expect that all publications
describing work using this software, or all commercial products using it,
quote at least one of the references given below:
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for
Bound Constrained Optimization, (1995), SIAM Journal on Scientific
and Statistical Computing , 16, 5, pp. 1190-1208.
* C. Zhu, R.H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization
(1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4,
pp. 550 - 560.
*************************************************************************/
#include "lbfgsbcuda.h"
#include <thrust/device_vector.h>
#include <thrust/scan.h>
namespace lbfgsbcuda {
__global__
void kernel0(
int* index,
const int* iwhere,
int* temp_ind1,
int* temp_ind2,
int nfree,
int n
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n)
return;
int k = index[i];
int iwk = iwhere[k];
int t1, t2;
if(i < nfree && iwk > 0) {
t1 = 1;
t2 = 0;
} else if(i >= nfree && iwk <= 0) {
t1 = 0;
t2 = 1;
} else {
t1 = t2 = 0;
}
temp_ind1[i] = t1;
temp_ind2[i] = t2;
}
__global__
void kernel1(
const int* index,
const int* temp_ind1,
const int* temp_ind2,
const int* temp_ind3,
const int* temp_ind4,
int* indx2,
int n
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n)
return;
int k = index[i];
if(temp_ind1[i])
{
indx2[n - temp_ind3[i]] = k;
} else if(temp_ind2[i]) {
indx2[temp_ind4[i] - 1] = k;
}
}
__global__
void kernel2(
const int* iwhere,
int* temp_ind1,
int* temp_ind2,
int n
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n)
return;
int iwi = iwhere[i];
if(iwi <= 0)
{
temp_ind1[i] = 1;
temp_ind2[i] = 0;
} else {
temp_ind1[i] = 0;
temp_ind2[i] = 1;
}
}
__global__
void kernel3(
int* index,
const int* iwhere,
const int* temp_ind1,
const int* temp_ind2,
int n
)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n)
return;
int iwi = iwhere[i];
if(iwi <= 0)
{
index[temp_ind1[i] - 1] = i;
} else {
index[n - temp_ind2[i]] = i;
}
}
namespace freev {
void prog0(
const int& n,
int& nfree,
int* index,
int& nenter,
int& ileave,
int* indx2,
const int* iwhere,
bool& wrk,
const bool& updatd,
const bool& cnstnd,
const int& iter,
int* temp_ind1,
int* temp_ind2,
int* temp_ind3,
int* temp_ind4
)
{
nenter = -1;
ileave = n;
if( iter > 0 && cnstnd )
{
CheckBuffer_int(iwhere, n, n);
CheckBuffer_int(index, n, n);
kernel0<<<iDivUp(n, 512), 512>>>
(index, iwhere, temp_ind1, temp_ind2, nfree, n);
CheckBuffer_int(temp_ind1, n, n);
CheckBuffer_int(temp_ind2, n, n);
thrust::device_ptr<int> dptr_ind1(temp_ind1);
thrust::device_ptr<int> dptr_ind2(temp_ind2);
thrust::device_ptr<int> dptr_ind3(temp_ind3);
thrust::device_ptr<int> dptr_ind4(temp_ind4);
thrust::inclusive_scan(dptr_ind1, dptr_ind1 + n, dptr_ind3);
thrust::inclusive_scan(dptr_ind2, dptr_ind2 + n, dptr_ind4);
CheckBuffer_int(temp_ind3, n, n);
CheckBuffer_int(temp_ind4, n, n);
kernel1<<<iDivUp(n, 512), 512>>>
(index, temp_ind1, temp_ind2, temp_ind3, temp_ind4, indx2, n);
CheckBuffer_int(index, n, n);
CheckBuffer_int(indx2, n, n);
cutilSafeCall(cudaMemcpy(&ileave, temp_ind3 + (n - 1), sizeof(int), cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(&nenter, temp_ind4 + (n - 1), sizeof(int), cudaMemcpyDeviceToHost));
ileave = n - ileave;
nenter = nenter - 1;
}
wrk = ileave < n || nenter >= 0 || updatd;
CheckBuffer_int(iwhere, n, n);
kernel2<<<iDivUp(n, 512), 512>>>
(iwhere, temp_ind1, temp_ind2, n);
CheckBuffer_int(temp_ind1, n, n);
CheckBuffer_int(temp_ind2, n, n);
thrust::device_ptr<int> dptr_ind1(temp_ind1);
thrust::device_ptr<int> dptr_ind2(temp_ind2);
thrust::inclusive_scan(dptr_ind1, dptr_ind1 + n, dptr_ind1);
thrust::inclusive_scan(dptr_ind2, dptr_ind2 + n, dptr_ind2);
CheckBuffer_int(temp_ind1, n, n);
CheckBuffer_int(temp_ind2, n, n);
kernel3<<<iDivUp(n, 512), 512>>>
(index, iwhere, temp_ind1, temp_ind2, n);
CheckBuffer_int(index, n, n);
cutilSafeCall(cudaMemcpy(&nfree, temp_ind1 + (n - 1), sizeof(int), cudaMemcpyDeviceToHost));
}
};
};
|
2cd9eacb39c742af4f60afcd39d2c2aee2d2a884.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zmergebicgstab2.cu normal z -> c, Tue Feb 9 16:05:42 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_c
// These routines merge multiple kernels from cmergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_creduce_kernel_spmv1(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_cbicgmerge_spmv1_kernel(
int n,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex * p,
magmaFloatComplex * r,
magmaFloatComplex * v,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * p[ dcolind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_C_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_cbicgstab_alphakernel(
magmaFloatComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param[in]
A magma_c_matrix
system matrix
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in]
dp magmaFloatComplex_ptr
input vector p
@param[in]
dr magmaFloatComplex_ptr
input vector r
@param[in]
dv magmaFloatComplex_ptr
output vector v
@param[in,out]
skp magmaFloatComplex_ptr
array for parameters ( skp[0]=alpha )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_spmv1(
magma_c_matrix A,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr dp,
magmaFloatComplex_ptr dr,
magmaFloatComplex_ptr dv,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_cbicgmerge_spmv1_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_creduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_cbicgstab_alphakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_creduce_kernel_spmv2(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_cbicgmerge_spmv2_kernel(
int n,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex * s,
magmaFloatComplex * t,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * s[ dcolind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
magmaFloatComplex tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_cbicgstab_omegakernel(
magmaFloatComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
A magma_c_matrix
input matrix
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in]
ds magmaFloatComplex_ptr
input vector s
@param[in]
dt magmaFloatComplex_ptr
output vector t
@param[in,out]
skp magmaFloatComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_spmv2(
magma_c_matrix A,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr ds,
magmaFloatComplex_ptr dt,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_cbicgmerge_spmv2_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, A.dval, A.drow, A.dcol, ds, dt, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_creduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+6, 1, queue );
magma_ccopyvector( 1, aux1+n, 1, skp+7, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_cbicgstab_omegakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_cbicgmerge_xrbeta_kernel(
int n,
magmaFloatComplex * rr,
magmaFloatComplex * r,
magmaFloatComplex * p,
magmaFloatComplex * s,
magmaFloatComplex * t,
magmaFloatComplex * x,
magmaFloatComplex * skp,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
magmaFloatComplex alpha=skp[0];
magmaFloatComplex omega=skp[2];
if( i<n ){
magmaFloatComplex sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
magmaFloatComplex tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_cbicgstab_betakernel(
magmaFloatComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp1 = skp[4]/skp[3];
magmaFloatComplex tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in]
rr magmaFloatComplex_ptr
input vector rr
@param[in]
r magmaFloatComplex_ptr
input/output vector r
@param[in]
p magmaFloatComplex_ptr
input vector p
@param[in]
s magmaFloatComplex_ptr
input vector s
@param[in]
t magmaFloatComplex_ptr
input vector t
@param[out]
x magmaFloatComplex_ptr
output vector x
@param[in]
skp magmaFloatComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_xrbeta(
magma_int_t n,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr rr,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr p,
magmaFloatComplex_ptr s,
magmaFloatComplex_ptr t,
magmaFloatComplex_ptr x,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_cbicgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_creduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+4, 1, queue );
magma_ccopyvector( 1, aux1+n, 1, skp+5, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_cbicgstab_betakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
| 2cd9eacb39c742af4f60afcd39d2c2aee2d2a884.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zmergebicgstab2.cu normal z -> c, Tue Feb 9 16:05:42 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_c
// These routines merge multiple kernels from cmergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_creduce_kernel_spmv1(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_cbicgmerge_spmv1_kernel(
int n,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex * p,
magmaFloatComplex * r,
magmaFloatComplex * v,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * p[ dcolind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_C_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_cbicgstab_alphakernel(
magmaFloatComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param[in]
A magma_c_matrix
system matrix
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in]
dp magmaFloatComplex_ptr
input vector p
@param[in]
dr magmaFloatComplex_ptr
input vector r
@param[in]
dv magmaFloatComplex_ptr
output vector v
@param[in,out]
skp magmaFloatComplex_ptr
array for parameters ( skp[0]=alpha )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_spmv1(
magma_c_matrix A,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr dp,
magmaFloatComplex_ptr dr,
magmaFloatComplex_ptr dv,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
magma_cbicgmerge_spmv1_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_creduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_cbicgstab_alphakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_creduce_kernel_spmv2(
int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_cbicgmerge_spmv2_kernel(
int n,
magmaFloatComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaFloatComplex * s,
magmaFloatComplex * t,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * s[ dcolind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
magmaFloatComplex tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_cbicgstab_omegakernel(
magmaFloatComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
A magma_c_matrix
input matrix
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in]
ds magmaFloatComplex_ptr
input vector s
@param[in]
dt magmaFloatComplex_ptr
output vector t
@param[in,out]
skp magmaFloatComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_spmv2(
magma_c_matrix A,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr ds,
magmaFloatComplex_ptr dt,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
magma_cbicgmerge_spmv2_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, A.dval, A.drow, A.dcol, ds, dt, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_creduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+6, 1, queue );
magma_ccopyvector( 1, aux1+n, 1, skp+7, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_cbicgstab_omegakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_cbicgmerge_xrbeta_kernel(
int n,
magmaFloatComplex * rr,
magmaFloatComplex * r,
magmaFloatComplex * p,
magmaFloatComplex * s,
magmaFloatComplex * t,
magmaFloatComplex * x,
magmaFloatComplex * skp,
magmaFloatComplex * vtmp )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
magmaFloatComplex alpha=skp[0];
magmaFloatComplex omega=skp[2];
if( i<n ){
magmaFloatComplex sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
magmaFloatComplex tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_cbicgstab_betakernel(
magmaFloatComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp1 = skp[4]/skp[3];
magmaFloatComplex tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaFloatComplex_ptr
temporary vector
@param[in]
d2 magmaFloatComplex_ptr
temporary vector
@param[in]
rr magmaFloatComplex_ptr
input vector rr
@param[in]
r magmaFloatComplex_ptr
input/output vector r
@param[in]
p magmaFloatComplex_ptr
input vector p
@param[in]
s magmaFloatComplex_ptr
input vector s
@param[in]
t magmaFloatComplex_ptr
input vector t
@param[out]
x magmaFloatComplex_ptr
output vector x
@param[in]
skp magmaFloatComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_cbicgmerge_xrbeta(
magma_int_t n,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr rr,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr p,
magmaFloatComplex_ptr s,
magmaFloatComplex_ptr t,
magmaFloatComplex_ptr x,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_cbicgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_creduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+4, 1, queue );
magma_ccopyvector( 1, aux1+n, 1, skp+5, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_cbicgstab_betakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
0f4943f9cca6cd58073e62dced90dd6bad207756.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/instance_norm_op.h"
namespace caffe2 {
namespace {
__global__ void InstanceNormMeanKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
float* mean_data) {
CUDA_1D_KERNEL_LOOP(i, N * C) {
const auto n = i / C;
const auto c = i % C;
mean_data[i] = 0;
auto input_offset = input_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
mean_data[i] += *input_offset;
input_offset += dim_stride;
}
mean_data[i] /= dim;
}
}
__global__ void InstanceNormInvStdevKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
float epsilon,
const float* input_data,
const float* mean_data,
float* inv_stdev_data) {
CUDA_1D_KERNEL_LOOP(i, N * C) {
const auto n = i / C;
const auto c = i % C;
inv_stdev_data[i] = 0;
auto input_offset = input_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
float diff = *input_offset - mean_data[i];
inv_stdev_data[i] += diff * diff;
input_offset += dim_stride;
}
inv_stdev_data[i] /= dim;
inv_stdev_data[i] += epsilon;
inv_stdev_data[i] = 1.0 / sqrtf(inv_stdev_data[i]);
}
}
__global__ void InstanceNormKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
const float* scale_data,
const float* bias_data,
const float* mean_data,
const float* inv_stdev_data,
float* output_data) {
CUDA_1D_KERNEL_LOOP(i, N * C * dim) {
auto index = i;
const auto j = index % dim;
index /= dim;
const auto c = index % C;
index /= C;
const auto n = index;
index = n * N_stride + c * C_stride + j * dim_stride;
const auto stat_idx = n * C + c;
output_data[index] = (input_data[index] - mean_data[stat_idx]) *
inv_stdev_data[stat_idx] * scale_data[c] +
bias_data[c];
}
}
__global__ void InstanceNormGradientKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
const float* scale_data,
const float* bias_data,
const float* output_grad_data,
const float* mean_data,
const float* inv_stdev_data,
float* input_grad_data) {
CUDA_1D_KERNEL_LOOP(i, N * C) {
const auto n = i / C;
const auto c = i % C;
auto input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
auto input_offset = input_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
*input_grad_offset = *input_offset - mean_data[i];
input_grad_offset += dim_stride;
input_offset += dim_stride;
}
auto temp = 0.0;
input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
auto output_grad_offset = output_grad_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
temp += *input_grad_offset * *output_grad_offset;
input_grad_offset += dim_stride;
output_grad_offset += dim_stride;
}
temp *= -powf(inv_stdev_data[i], 3.0) / dim;
input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
output_grad_offset = output_grad_data + n * N_stride + c * C_stride;
auto mean = 0.0;
for (int j = 0; j < dim; ++j) {
*input_grad_offset *= temp;
*input_grad_offset += *output_grad_offset * inv_stdev_data[i];
mean += *input_grad_offset;
input_grad_offset += dim_stride;
output_grad_offset += dim_stride;
}
mean /= dim;
input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
*input_grad_offset -= mean;
*input_grad_offset *= scale_data[c];
input_grad_offset += dim_stride;
}
}
}
__global__ void InstanceNormScaleBiasGradientKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
const float* mean_data,
const float* output_grad_data,
const float* inv_stdev_data,
float* scale_grad_data,
float* bias_grad_data) {
CUDA_1D_KERNEL_LOOP(c, C) {
scale_grad_data[c] = 0;
bias_grad_data[c] = 0;
auto input_offset = input_data + c * C_stride;
auto output_grad_offset = output_grad_data + c * C_stride;
auto mean_offset = mean_data + c;
auto inv_stdev_offset = inv_stdev_data + c;
for (int n = 0; n < N; ++n) {
auto input_offset_inner = input_offset + n * N_stride;
auto output_grad_offset_inner = output_grad_offset + n * N_stride;
for (int i = 0; i < dim; ++i) {
scale_grad_data[c] += (*input_offset_inner - *mean_offset) *
*inv_stdev_offset * *output_grad_offset_inner;
bias_grad_data[c] += *output_grad_offset_inner;
input_offset_inner += dim_stride;
output_grad_offset_inner += dim_stride;
}
mean_offset += C;
inv_stdev_offset += C;
}
}
}
} // namespace
template <>
bool InstanceNormOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
auto mean = OutputSize() >= 2 ? Output(MEAN) : &mean_;
auto inv_stdev = OutputSize() >= 3 ? Output(INV_STDEV) : &inv_stdev_;
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int H = input.dim32(1);
const int W = input.dim32(2);
const int C = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
auto output = Output(OUTPUT, input.sizes(), at::dtype<float>());
mean->Resize(N, C);
inv_stdev->Resize(N, C);
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
auto output_data = output->template mutable_data<float>();
auto mean_data = mean->template mutable_data<float>();
auto inv_stdev_data = inv_stdev->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = 1;
const auto dim_stride = C;
hipLaunchKernelGGL(( InstanceNormMeanKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, C, dim, N_stride, C_stride, dim_stride, input_data, mean_data);
hipLaunchKernelGGL(( InstanceNormInvStdevKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_data);
hipLaunchKernelGGL(( InstanceNormKernel),
dim3(CAFFE_GET_BLOCKS(N * C * H * W)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
mean_data,
inv_stdev_data,
output_data);
return true;
}
template <>
bool InstanceNormOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
auto mean = OutputSize() >= 2 ? Output(MEAN) : &mean_;
auto inv_stdev = OutputSize() >= 3 ? Output(INV_STDEV) : &inv_stdev_;
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int C = input.dim32(1);
const int H = input.dim32(2);
const int W = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
auto output = Output(OUTPUT, input.sizes(), at::dtype<float>());
mean->Resize(N, C);
inv_stdev->Resize(N, C);
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
auto output_data = output->template mutable_data<float>();
auto mean_data = mean->template mutable_data<float>();
auto inv_stdev_data = inv_stdev->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = H * W;
const auto dim_stride = 1;
hipLaunchKernelGGL(( InstanceNormMeanKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, C, dim, N_stride, C_stride, dim_stride, input_data, mean_data);
hipLaunchKernelGGL(( InstanceNormInvStdevKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_data);
hipLaunchKernelGGL(( InstanceNormKernel),
dim3(CAFFE_GET_BLOCKS(N * C * H * W)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
mean_data,
inv_stdev_data,
output_data);
return true;
}
template <>
bool InstanceNormGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
const auto& output_grad = Input(OUTPUT_GRAD);
const auto& mean = InputSize() >= 5 ? Input(MEAN) : mean_;
const auto& inv_stdev = InputSize() >= 6 ? Input(INV_STDEV) : inv_stdev_;
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int H = input.dim32(1);
const int W = input.dim32(2);
const int C = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
CAFFE_ENFORCE_EQ(4, output_grad.ndim());
CAFFE_ENFORCE_EQ(N, output_grad.dim32(0));
CAFFE_ENFORCE_EQ(H, output_grad.dim32(1));
CAFFE_ENFORCE_EQ(W, output_grad.dim32(2));
CAFFE_ENFORCE_EQ(C, output_grad.dim32(3));
auto input_grad = Output(INPUT_GRAD, input.sizes(), at::dtype<float>());
auto scale_grad = Output(SCALE_GRAD, scale.sizes(), at::dtype<float>());
auto bias_grad = Output(BIAS_GRAD, bias.sizes(), at::dtype<float>());
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
const auto output_grad_data = output_grad.data<float>();
auto input_grad_data = input_grad->template mutable_data<float>();
auto scale_grad_data = scale_grad->template mutable_data<float>();
auto bias_grad_data = bias_grad->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = 1;
const auto dim_stride = C;
if (InputSize() < 5) {
ReinitializeTensor(&mean_, {N, C}, at::dtype<float>().device(CUDA));
auto mean_mutable_data = mean_.mutable_data<float>();
hipLaunchKernelGGL(( InstanceNormMeanKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_mutable_data);
}
CAFFE_ENFORCE_EQ(2, mean.ndim());
CAFFE_ENFORCE_EQ(N, mean.dim32(0));
CAFFE_ENFORCE_EQ(C, mean.dim32(1));
const auto mean_data = mean.data<float>();
if (InputSize() < 6) {
ReinitializeTensor(&inv_stdev_, {N, C}, at::dtype<float>().device(CUDA));
auto inv_stdev_mutable_data = inv_stdev_.mutable_data<float>();
hipLaunchKernelGGL(( InstanceNormInvStdevKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_mutable_data);
}
CAFFE_ENFORCE_EQ(2, inv_stdev.ndim());
CAFFE_ENFORCE_EQ(N, inv_stdev.dim32(0));
CAFFE_ENFORCE_EQ(C, inv_stdev.dim32(1));
const auto inv_stdev_data = inv_stdev.data<float>();
hipLaunchKernelGGL(( InstanceNormScaleBiasGradientKernel),
dim3(CAFFE_GET_BLOCKS(C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_data,
output_grad_data,
inv_stdev_data,
scale_grad_data,
bias_grad_data);
hipLaunchKernelGGL(( InstanceNormGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
output_grad_data,
mean_data,
inv_stdev_data,
input_grad_data);
return true;
}
template <>
bool InstanceNormGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
const auto& output_grad = Input(OUTPUT_GRAD);
const auto& mean = InputSize() >= 5 ? Input(MEAN) : mean_;
const auto& inv_stdev = InputSize() >= 6 ? Input(INV_STDEV) : inv_stdev_;
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int C = input.dim32(1);
const int H = input.dim32(2);
const int W = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
CAFFE_ENFORCE_EQ(4, output_grad.ndim());
CAFFE_ENFORCE_EQ(N, output_grad.dim32(0));
CAFFE_ENFORCE_EQ(C, output_grad.dim32(1));
CAFFE_ENFORCE_EQ(H, output_grad.dim32(2));
CAFFE_ENFORCE_EQ(W, output_grad.dim32(3));
auto input_grad = Output(INPUT_GRAD, input.sizes(), at::dtype<float>());
auto scale_grad = Output(SCALE_GRAD, scale.sizes(), at::dtype<float>());
auto bias_grad = Output(BIAS_GRAD, bias.sizes(), at::dtype<float>());
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
const auto output_grad_data = output_grad.data<float>();
auto input_grad_data = input_grad->template mutable_data<float>();
auto scale_grad_data = scale_grad->template mutable_data<float>();
auto bias_grad_data = bias_grad->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = H * W;
const auto dim_stride = 1;
if (InputSize() < 5) {
mean_.Resize(N, C);
auto mean_mutable_data = mean_.mutable_data<float>();
hipLaunchKernelGGL(( InstanceNormMeanKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_mutable_data);
}
CAFFE_ENFORCE_EQ(2, mean.ndim());
CAFFE_ENFORCE_EQ(N, mean.dim32(0));
CAFFE_ENFORCE_EQ(C, mean.dim32(1));
const auto mean_data = mean.data<float>();
if (InputSize() < 6) {
inv_stdev_.Resize(N, C);
auto inv_stdev_mutable_data = inv_stdev_.mutable_data<float>();
hipLaunchKernelGGL(( InstanceNormInvStdevKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_mutable_data);
}
CAFFE_ENFORCE_EQ(2, inv_stdev.ndim());
CAFFE_ENFORCE_EQ(N, inv_stdev.dim32(0));
CAFFE_ENFORCE_EQ(C, inv_stdev.dim32(1));
const auto inv_stdev_data = inv_stdev.data<float>();
hipLaunchKernelGGL(( InstanceNormScaleBiasGradientKernel),
dim3(CAFFE_GET_BLOCKS(C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_data,
output_grad_data,
inv_stdev_data,
scale_grad_data,
bias_grad_data);
hipLaunchKernelGGL(( InstanceNormGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * C)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
output_grad_data,
mean_data,
inv_stdev_data,
input_grad_data);
return true;
}
REGISTER_CUDA_OPERATOR(InstanceNorm, InstanceNormOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
InstanceNormGradient,
InstanceNormGradientOp<float, CUDAContext>);
} // namespace caffe2
| 0f4943f9cca6cd58073e62dced90dd6bad207756.cu | #include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/instance_norm_op.h"
namespace caffe2 {
namespace {
__global__ void InstanceNormMeanKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
float* mean_data) {
CUDA_1D_KERNEL_LOOP(i, N * C) {
const auto n = i / C;
const auto c = i % C;
mean_data[i] = 0;
auto input_offset = input_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
mean_data[i] += *input_offset;
input_offset += dim_stride;
}
mean_data[i] /= dim;
}
}
__global__ void InstanceNormInvStdevKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
float epsilon,
const float* input_data,
const float* mean_data,
float* inv_stdev_data) {
CUDA_1D_KERNEL_LOOP(i, N * C) {
const auto n = i / C;
const auto c = i % C;
inv_stdev_data[i] = 0;
auto input_offset = input_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
float diff = *input_offset - mean_data[i];
inv_stdev_data[i] += diff * diff;
input_offset += dim_stride;
}
inv_stdev_data[i] /= dim;
inv_stdev_data[i] += epsilon;
inv_stdev_data[i] = 1.0 / sqrtf(inv_stdev_data[i]);
}
}
__global__ void InstanceNormKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
const float* scale_data,
const float* bias_data,
const float* mean_data,
const float* inv_stdev_data,
float* output_data) {
CUDA_1D_KERNEL_LOOP(i, N * C * dim) {
auto index = i;
const auto j = index % dim;
index /= dim;
const auto c = index % C;
index /= C;
const auto n = index;
index = n * N_stride + c * C_stride + j * dim_stride;
const auto stat_idx = n * C + c;
output_data[index] = (input_data[index] - mean_data[stat_idx]) *
inv_stdev_data[stat_idx] * scale_data[c] +
bias_data[c];
}
}
__global__ void InstanceNormGradientKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
const float* scale_data,
const float* bias_data,
const float* output_grad_data,
const float* mean_data,
const float* inv_stdev_data,
float* input_grad_data) {
CUDA_1D_KERNEL_LOOP(i, N * C) {
const auto n = i / C;
const auto c = i % C;
auto input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
auto input_offset = input_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
*input_grad_offset = *input_offset - mean_data[i];
input_grad_offset += dim_stride;
input_offset += dim_stride;
}
auto temp = 0.0;
input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
auto output_grad_offset = output_grad_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
temp += *input_grad_offset * *output_grad_offset;
input_grad_offset += dim_stride;
output_grad_offset += dim_stride;
}
temp *= -powf(inv_stdev_data[i], 3.0) / dim;
input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
output_grad_offset = output_grad_data + n * N_stride + c * C_stride;
auto mean = 0.0;
for (int j = 0; j < dim; ++j) {
*input_grad_offset *= temp;
*input_grad_offset += *output_grad_offset * inv_stdev_data[i];
mean += *input_grad_offset;
input_grad_offset += dim_stride;
output_grad_offset += dim_stride;
}
mean /= dim;
input_grad_offset = input_grad_data + n * N_stride + c * C_stride;
for (int j = 0; j < dim; ++j) {
*input_grad_offset -= mean;
*input_grad_offset *= scale_data[c];
input_grad_offset += dim_stride;
}
}
}
__global__ void InstanceNormScaleBiasGradientKernel(
int N,
int C,
int dim,
int N_stride,
int C_stride,
int dim_stride,
const float* input_data,
const float* mean_data,
const float* output_grad_data,
const float* inv_stdev_data,
float* scale_grad_data,
float* bias_grad_data) {
CUDA_1D_KERNEL_LOOP(c, C) {
scale_grad_data[c] = 0;
bias_grad_data[c] = 0;
auto input_offset = input_data + c * C_stride;
auto output_grad_offset = output_grad_data + c * C_stride;
auto mean_offset = mean_data + c;
auto inv_stdev_offset = inv_stdev_data + c;
for (int n = 0; n < N; ++n) {
auto input_offset_inner = input_offset + n * N_stride;
auto output_grad_offset_inner = output_grad_offset + n * N_stride;
for (int i = 0; i < dim; ++i) {
scale_grad_data[c] += (*input_offset_inner - *mean_offset) *
*inv_stdev_offset * *output_grad_offset_inner;
bias_grad_data[c] += *output_grad_offset_inner;
input_offset_inner += dim_stride;
output_grad_offset_inner += dim_stride;
}
mean_offset += C;
inv_stdev_offset += C;
}
}
}
} // namespace
template <>
bool InstanceNormOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
auto mean = OutputSize() >= 2 ? Output(MEAN) : &mean_;
auto inv_stdev = OutputSize() >= 3 ? Output(INV_STDEV) : &inv_stdev_;
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int H = input.dim32(1);
const int W = input.dim32(2);
const int C = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
auto output = Output(OUTPUT, input.sizes(), at::dtype<float>());
mean->Resize(N, C);
inv_stdev->Resize(N, C);
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
auto output_data = output->template mutable_data<float>();
auto mean_data = mean->template mutable_data<float>();
auto inv_stdev_data = inv_stdev->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = 1;
const auto dim_stride = C;
InstanceNormMeanKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, C, dim, N_stride, C_stride, dim_stride, input_data, mean_data);
InstanceNormInvStdevKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_data);
InstanceNormKernel<<<
CAFFE_GET_BLOCKS(N * C * H * W),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
mean_data,
inv_stdev_data,
output_data);
return true;
}
template <>
bool InstanceNormOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
auto mean = OutputSize() >= 2 ? Output(MEAN) : &mean_;
auto inv_stdev = OutputSize() >= 3 ? Output(INV_STDEV) : &inv_stdev_;
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int C = input.dim32(1);
const int H = input.dim32(2);
const int W = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
auto output = Output(OUTPUT, input.sizes(), at::dtype<float>());
mean->Resize(N, C);
inv_stdev->Resize(N, C);
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
auto output_data = output->template mutable_data<float>();
auto mean_data = mean->template mutable_data<float>();
auto inv_stdev_data = inv_stdev->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = H * W;
const auto dim_stride = 1;
InstanceNormMeanKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, C, dim, N_stride, C_stride, dim_stride, input_data, mean_data);
InstanceNormInvStdevKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_data);
InstanceNormKernel<<<
CAFFE_GET_BLOCKS(N * C * H * W),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
mean_data,
inv_stdev_data,
output_data);
return true;
}
template <>
bool InstanceNormGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
const auto& output_grad = Input(OUTPUT_GRAD);
const auto& mean = InputSize() >= 5 ? Input(MEAN) : mean_;
const auto& inv_stdev = InputSize() >= 6 ? Input(INV_STDEV) : inv_stdev_;
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int H = input.dim32(1);
const int W = input.dim32(2);
const int C = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
CAFFE_ENFORCE_EQ(4, output_grad.ndim());
CAFFE_ENFORCE_EQ(N, output_grad.dim32(0));
CAFFE_ENFORCE_EQ(H, output_grad.dim32(1));
CAFFE_ENFORCE_EQ(W, output_grad.dim32(2));
CAFFE_ENFORCE_EQ(C, output_grad.dim32(3));
auto input_grad = Output(INPUT_GRAD, input.sizes(), at::dtype<float>());
auto scale_grad = Output(SCALE_GRAD, scale.sizes(), at::dtype<float>());
auto bias_grad = Output(BIAS_GRAD, bias.sizes(), at::dtype<float>());
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
const auto output_grad_data = output_grad.data<float>();
auto input_grad_data = input_grad->template mutable_data<float>();
auto scale_grad_data = scale_grad->template mutable_data<float>();
auto bias_grad_data = bias_grad->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = 1;
const auto dim_stride = C;
if (InputSize() < 5) {
ReinitializeTensor(&mean_, {N, C}, at::dtype<float>().device(CUDA));
auto mean_mutable_data = mean_.mutable_data<float>();
InstanceNormMeanKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_mutable_data);
}
CAFFE_ENFORCE_EQ(2, mean.ndim());
CAFFE_ENFORCE_EQ(N, mean.dim32(0));
CAFFE_ENFORCE_EQ(C, mean.dim32(1));
const auto mean_data = mean.data<float>();
if (InputSize() < 6) {
ReinitializeTensor(&inv_stdev_, {N, C}, at::dtype<float>().device(CUDA));
auto inv_stdev_mutable_data = inv_stdev_.mutable_data<float>();
InstanceNormInvStdevKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_mutable_data);
}
CAFFE_ENFORCE_EQ(2, inv_stdev.ndim());
CAFFE_ENFORCE_EQ(N, inv_stdev.dim32(0));
CAFFE_ENFORCE_EQ(C, inv_stdev.dim32(1));
const auto inv_stdev_data = inv_stdev.data<float>();
InstanceNormScaleBiasGradientKernel<<<
CAFFE_GET_BLOCKS(C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_data,
output_grad_data,
inv_stdev_data,
scale_grad_data,
bias_grad_data);
InstanceNormGradientKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
output_grad_data,
mean_data,
inv_stdev_data,
input_grad_data);
return true;
}
template <>
bool InstanceNormGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& input = Input(INPUT);
const auto& scale = Input(SCALE);
const auto& bias = Input(BIAS);
const auto& output_grad = Input(OUTPUT_GRAD);
const auto& mean = InputSize() >= 5 ? Input(MEAN) : mean_;
const auto& inv_stdev = InputSize() >= 6 ? Input(INV_STDEV) : inv_stdev_;
CAFFE_ENFORCE_EQ(4, input.ndim());
const int N = input.dim32(0);
const int C = input.dim32(1);
const int H = input.dim32(2);
const int W = input.dim32(3);
CAFFE_ENFORCE_EQ(1, scale.ndim());
CAFFE_ENFORCE_EQ(C, scale.dim32(0));
CAFFE_ENFORCE_EQ(1, bias.ndim());
CAFFE_ENFORCE_EQ(C, bias.dim32(0));
CAFFE_ENFORCE_EQ(4, output_grad.ndim());
CAFFE_ENFORCE_EQ(N, output_grad.dim32(0));
CAFFE_ENFORCE_EQ(C, output_grad.dim32(1));
CAFFE_ENFORCE_EQ(H, output_grad.dim32(2));
CAFFE_ENFORCE_EQ(W, output_grad.dim32(3));
auto input_grad = Output(INPUT_GRAD, input.sizes(), at::dtype<float>());
auto scale_grad = Output(SCALE_GRAD, scale.sizes(), at::dtype<float>());
auto bias_grad = Output(BIAS_GRAD, bias.sizes(), at::dtype<float>());
const auto input_data = input.data<float>();
const auto scale_data = scale.data<float>();
const auto bias_data = bias.data<float>();
const auto output_grad_data = output_grad.data<float>();
auto input_grad_data = input_grad->template mutable_data<float>();
auto scale_grad_data = scale_grad->template mutable_data<float>();
auto bias_grad_data = bias_grad->template mutable_data<float>();
const auto dim = H * W;
const auto N_stride = C * H * W;
const auto C_stride = H * W;
const auto dim_stride = 1;
if (InputSize() < 5) {
mean_.Resize(N, C);
auto mean_mutable_data = mean_.mutable_data<float>();
InstanceNormMeanKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_mutable_data);
}
CAFFE_ENFORCE_EQ(2, mean.ndim());
CAFFE_ENFORCE_EQ(N, mean.dim32(0));
CAFFE_ENFORCE_EQ(C, mean.dim32(1));
const auto mean_data = mean.data<float>();
if (InputSize() < 6) {
inv_stdev_.Resize(N, C);
auto inv_stdev_mutable_data = inv_stdev_.mutable_data<float>();
InstanceNormInvStdevKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
epsilon_,
input_data,
mean_data,
inv_stdev_mutable_data);
}
CAFFE_ENFORCE_EQ(2, inv_stdev.ndim());
CAFFE_ENFORCE_EQ(N, inv_stdev.dim32(0));
CAFFE_ENFORCE_EQ(C, inv_stdev.dim32(1));
const auto inv_stdev_data = inv_stdev.data<float>();
InstanceNormScaleBiasGradientKernel<<<
CAFFE_GET_BLOCKS(C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
mean_data,
output_grad_data,
inv_stdev_data,
scale_grad_data,
bias_grad_data);
InstanceNormGradientKernel<<<
CAFFE_GET_BLOCKS(N * C),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
C,
dim,
N_stride,
C_stride,
dim_stride,
input_data,
scale_data,
bias_data,
output_grad_data,
mean_data,
inv_stdev_data,
input_grad_data);
return true;
}
REGISTER_CUDA_OPERATOR(InstanceNorm, InstanceNormOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
InstanceNormGradient,
InstanceNormGradientOp<float, CUDAContext>);
} // namespace caffe2
|
d028ff6d9fc37f6911af6f6adee5048debe70d0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* calcInteractionParticlesOnFluid.cu
*
* Created on: 27-10-2015
* Author: Kamil Szewc (kamil.szewc@gmail.com)
*/
#include "../../sph.h"
#include "../../hlp.h"
#include "../../methods/kernels.cuh"
#include "../../methods/calcGridHash.cuh"
#include "../../methods/calcGridPos.cuh"
#include "../../methods/calcRelPosVelNoSlip.cuh"
#include "wcsphStandardDispersedPhase.h"
__device__ static float calcK(real2 dvel, real dLiquid, real oLiquid, real oDust, real d, real visc)
{
real u = sqrt(pow2(dvel.x) + pow2(dvel.y));
real re = d * u / visc;
real cd = 0.0;
if (re > 0.0) cd = 24.0 * sqrt(1.0 + (3.0 * re / 16.0)) / re;
return (3.0 / 4.0) * dLiquid * oLiquid * oDust * cd * u * pow(oLiquid, -2.65) / d;
}
__device__ static real6 interaction(uint j, Particle *p, uint i, Particle *pFluid, real2 dpos, real2 dvel, Parameters *par)
{
real r = sqrt(pow2(dpos.x) + pow2(dpos.y));
real q = r * par->I_H;
if (q < 2.0)
{
real gkx = grad_of_kern(dpos.x, q, par->I_H);
real gky = grad_of_kern(dpos.y, q, par->I_H);
real k = kern_kwon_monaghan(q, par->I_H);
real D = 0.0005;
real K = calcK(dvel, pFluid[i].d, pFluid[i].o, p[j].o, D, pFluid[i].nu);
real pres = p[j].m * p[j].o / p[j].d;
real visc = 2.0 * p[j].m * K * (dvel.x*dpos.x + dvel.y*dpos.y) * k / ((pow2(r) + 0.01*pow2(par->H)) * p[j].d);
real dens = dvel.x*gkx + dvel.y*gky;
return MAKE_REAL6(pres * gkx, pres * gky, visc * dpos.x, visc * dpos.y, p[j].m * dens, 0.0);
}
else
{
return MAKE_REAL6(0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
}
}
__global__ void calcInteractionParticlesOnFluidWSDP(Particle *p,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Particle *pPDPF,
Parameters *par)
{
uint index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < par->N) {
real2 pos = MAKE_REAL2(p[index].pos.x, p[index].pos.y);
int2 gridPos = calcGridPos(pos, par);
uint gridHash0 = calcGridHash(gridPos, par);
real6 result = MAKE_REAL6(0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int2 gridPos2;
gridPos2.x = gridPos.x + x;
gridPos2.y = gridPos.y + y;
if ((gridPos2.x < 0) || (gridPos2.x > par->NXC - 1) || (gridPos2.y < 0) || (gridPos2.y > par->NYC - 1)) continue;
uint gridHash = calcGridHash(gridPos2, par);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff)
{
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++)
{
real2 dpos, dvel;
real2 pos1 = p[index].pos;
real2 pos2 = pPDPF[j].pos;
real2 vel1 = p[index].vel;
real2 vel2 = pPDPF[j].vel;
calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
if (((gridPos.x == 0) && (gridPos2.x == 0)) || ((gridPos.x == par->NXC - 1) && (gridPos2.x == par->NXC - 1)) ||
((gridPos.y == 0) && (gridPos2.y == 0)) || ((gridPos.y == par->NYC - 1) && (gridPos2.y == par->NYC - 1)))
{
if (par->T_BOUNDARY_PERIODICITY != 1)
{
if (gridPos.y == gridPos2.y)
{
if (gridPos.y == par->NYC - 1)
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 1, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
if (gridPos.y == 0)
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 3, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
}
}
if (par->T_BOUNDARY_PERIODICITY == 0)
{
if (gridPos.x == gridPos2.x)
{
if (gridPos2.x == 0)
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 4, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
if (gridPos2.x == par->NXC - 1)
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 2, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
}
if ((gridPos.x == gridPos2.x) && (gridPos.y == gridPos.y))
{
if ((gridPos.x == 0) && (gridPos.y == 0))
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 7, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
if ((gridPos.x == par->NXC - 1) && (gridPos.y == 0))
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 6, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
if ((gridPos.x == 0) && (gridPos.y == par->NYC - 1))
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 8, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
if ((gridPos.x == par->NXC - 1) && (gridPos.y == par->NYC - 1))
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 5, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
}
}
}
}
}
}
}
if ((par->T_BOUNDARY_PERIODICITY > 0) && ((gridPos.x == 0) || (gridPos.x == par->NXC - 1)))
{
for (int y = -1; y <= 1; y++)
{
int2 gridPos2;
if (gridPos.x == 0) gridPos2.x = par->NXC - 1;
if (gridPos.x == par->NXC - 1) gridPos2.x = 0;
gridPos2.y = gridPos.y + y;
if ((gridPos2.y < 0) || (gridPos2.y > par->NYC - 1)) continue;
uint gridHash = calcGridHash(gridPos2, par);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff)
{
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++)
{
real2 dpos, dvel;
real2 pos1 = p[index].pos;
if (gridPos.x == 0) pos1.x += par->XCV;
if (gridPos.x == par->NXC - 1) pos1.x -= par->XCV;
real2 pos2 = pPDPF[j].pos;
real2 vel1 = p[index].vel;
real2 vel2 = pPDPF[j].vel;
calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
}
}
}
if ((par->T_BOUNDARY_PERIODICITY == 1) && ((gridPos.y == 0) || (gridPos.y == par->NYC - 1)))
{
for (int x = -1; x <= 1; x++)
{
int2 gridPos2;
if (gridPos.y == 0) gridPos2.y = par->NYC - 1;
if (gridPos.y == par->NYC - 1) gridPos2.y = 0;
gridPos2.x = gridPos.x + x;
if ((gridPos2.x < 0) || (gridPos2.x > par->NXC - 1)) continue;
uint gridHash = calcGridHash(gridPos2, par);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff)
{
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++)
{
real2 dpos, dvel;
real2 pos1 = p[index].pos;
if (gridPos.y == 0) pos1.y += par->YCV;
if (gridPos.y == par->NYC - 1) pos1.y -= par->YCV;
real2 pos2 = pPDPF[j].pos;
real2 vel1 = p[index].vel;
real2 vel2 = pPDPF[j].vel;
calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
}
}
}
if (par->T_BOUNDARY_PERIODICITY == 1)
{
if (((gridPos.x == 0) && (gridPos.y == 0))
|| (gridPos.x == 0) && (gridPos.y == par->NYC - 1)
|| (gridPos.x == par->NXC - 1) && (gridPos.y == 0)
|| (gridPos.x == par->NXC - 1) && (gridPos.y == par->NYC - 1))
{
int2 gridPos2;
if (gridPos.x == 0) gridPos2.x = par->NXC - 1;
else gridPos2.x = 0;
if (gridPos.y == 0) gridPos2.y = par->NYC - 1;
else gridPos2.y = 0;
uint gridHash = calcGridHash(gridPos2, par);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff)
{
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++)
{
real2 dpos, dvel;
real2 pos1 = p[index].pos;
if (gridPos.x == 0) pos1.x += par->XCV;
else pos1.x -= par->XCV;
if (gridPos.y == 0) pos1.y += par->YCV;
else pos1.y -= par->YCV;
real2 pos2 = pPDPF[j].pos;
real2 vel1 = p[index].vel;
real2 vel2 = pPDPF[j].vel;
calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
}
}
}
if (par->T_BOUNDARY_PERIODICITY == 2)
{
if (((gridPos.x == 0) && (gridPos.y == 0))
|| (gridPos.x == 0) && (gridPos.y == par->NYC - 1)
|| (gridPos.x == par->NXC - 1) && (gridPos.y == 0)
|| (gridPos.x == par->NXC - 1) && (gridPos.y == par->NYC - 1))
{
int2 gridPos2;
if (gridPos.x == 0) gridPos2.x = par->NXC - 1;
else gridPos2.x = 0;
gridPos2.y = gridPos.y;
uint gridHash = calcGridHash(gridPos2, par);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff)
{
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++)
{
real2 dpos, dvel;
real2 pos1 = p[index].pos;
if (gridPos.x == 0) pos1.x += par->XCV;
else pos1.x -= par->XCV;
real2 pos2 = pPDPF[j].pos;
real2 vel1 = p[index].vel;
real2 vel2 = pPDPF[j].vel;
if (gridPos.y == par->NYC - 1)
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 1, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
if (gridPos.y == 0)
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 3, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
}
}
}
}
p[index].rh_vel.x = -( (result.x * p[index].p) + result.z) / p[index].d;
p[index].rh_vel.y = -( (result.y * p[index].p) + result.u) / p[index].d;
p[index].rh_m = -result.v / (pPDPF[0].di);
}
}
| d028ff6d9fc37f6911af6f6adee5048debe70d0c.cu | /*
* calcInteractionParticlesOnFluid.cu
*
* Created on: 27-10-2015
* Author: Kamil Szewc (kamil.szewc@gmail.com)
*/
#include "../../sph.h"
#include "../../hlp.h"
#include "../../methods/kernels.cuh"
#include "../../methods/calcGridHash.cuh"
#include "../../methods/calcGridPos.cuh"
#include "../../methods/calcRelPosVelNoSlip.cuh"
#include "wcsphStandardDispersedPhase.h"
__device__ static float calcK(real2 dvel, real dLiquid, real oLiquid, real oDust, real d, real visc)
{
real u = sqrt(pow2(dvel.x) + pow2(dvel.y));
real re = d * u / visc;
real cd = 0.0;
if (re > 0.0) cd = 24.0 * sqrt(1.0 + (3.0 * re / 16.0)) / re;
return (3.0 / 4.0) * dLiquid * oLiquid * oDust * cd * u * pow(oLiquid, -2.65) / d;
}
__device__ static real6 interaction(uint j, Particle *p, uint i, Particle *pFluid, real2 dpos, real2 dvel, Parameters *par)
{
real r = sqrt(pow2(dpos.x) + pow2(dpos.y));
real q = r * par->I_H;
if (q < 2.0)
{
real gkx = grad_of_kern(dpos.x, q, par->I_H);
real gky = grad_of_kern(dpos.y, q, par->I_H);
real k = kern_kwon_monaghan(q, par->I_H);
real D = 0.0005;
real K = calcK(dvel, pFluid[i].d, pFluid[i].o, p[j].o, D, pFluid[i].nu);
real pres = p[j].m * p[j].o / p[j].d;
real visc = 2.0 * p[j].m * K * (dvel.x*dpos.x + dvel.y*dpos.y) * k / ((pow2(r) + 0.01*pow2(par->H)) * p[j].d);
real dens = dvel.x*gkx + dvel.y*gky;
return MAKE_REAL6(pres * gkx, pres * gky, visc * dpos.x, visc * dpos.y, p[j].m * dens, 0.0);
}
else
{
return MAKE_REAL6(0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
}
}
__global__ void calcInteractionParticlesOnFluidWSDP(Particle *p,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Particle *pPDPF,
Parameters *par)
{
uint index = threadIdx.x + blockIdx.x*blockDim.x;
if (index < par->N) {
real2 pos = MAKE_REAL2(p[index].pos.x, p[index].pos.y);
int2 gridPos = calcGridPos(pos, par);
uint gridHash0 = calcGridHash(gridPos, par);
real6 result = MAKE_REAL6(0.0, 0.0, 0.0, 0.0, 0.0, 0.0);
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int2 gridPos2;
gridPos2.x = gridPos.x + x;
gridPos2.y = gridPos.y + y;
if ((gridPos2.x < 0) || (gridPos2.x > par->NXC - 1) || (gridPos2.y < 0) || (gridPos2.y > par->NYC - 1)) continue;
uint gridHash = calcGridHash(gridPos2, par);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff)
{
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++)
{
real2 dpos, dvel;
real2 pos1 = p[index].pos;
real2 pos2 = pPDPF[j].pos;
real2 vel1 = p[index].vel;
real2 vel2 = pPDPF[j].vel;
calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
if (((gridPos.x == 0) && (gridPos2.x == 0)) || ((gridPos.x == par->NXC - 1) && (gridPos2.x == par->NXC - 1)) ||
((gridPos.y == 0) && (gridPos2.y == 0)) || ((gridPos.y == par->NYC - 1) && (gridPos2.y == par->NYC - 1)))
{
if (par->T_BOUNDARY_PERIODICITY != 1)
{
if (gridPos.y == gridPos2.y)
{
if (gridPos.y == par->NYC - 1)
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 1, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
if (gridPos.y == 0)
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 3, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
}
}
if (par->T_BOUNDARY_PERIODICITY == 0)
{
if (gridPos.x == gridPos2.x)
{
if (gridPos2.x == 0)
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 4, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
if (gridPos2.x == par->NXC - 1)
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 2, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
}
if ((gridPos.x == gridPos2.x) && (gridPos.y == gridPos.y))
{
if ((gridPos.x == 0) && (gridPos.y == 0))
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 7, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
if ((gridPos.x == par->NXC - 1) && (gridPos.y == 0))
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 6, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
if ((gridPos.x == 0) && (gridPos.y == par->NYC - 1))
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 8, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
if ((gridPos.x == par->NXC - 1) && (gridPos.y == par->NYC - 1))
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 5, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
}
}
}
}
}
}
}
if ((par->T_BOUNDARY_PERIODICITY > 0) && ((gridPos.x == 0) || (gridPos.x == par->NXC - 1)))
{
for (int y = -1; y <= 1; y++)
{
int2 gridPos2;
if (gridPos.x == 0) gridPos2.x = par->NXC - 1;
if (gridPos.x == par->NXC - 1) gridPos2.x = 0;
gridPos2.y = gridPos.y + y;
if ((gridPos2.y < 0) || (gridPos2.y > par->NYC - 1)) continue;
uint gridHash = calcGridHash(gridPos2, par);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff)
{
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++)
{
real2 dpos, dvel;
real2 pos1 = p[index].pos;
if (gridPos.x == 0) pos1.x += par->XCV;
if (gridPos.x == par->NXC - 1) pos1.x -= par->XCV;
real2 pos2 = pPDPF[j].pos;
real2 vel1 = p[index].vel;
real2 vel2 = pPDPF[j].vel;
calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
}
}
}
if ((par->T_BOUNDARY_PERIODICITY == 1) && ((gridPos.y == 0) || (gridPos.y == par->NYC - 1)))
{
for (int x = -1; x <= 1; x++)
{
int2 gridPos2;
if (gridPos.y == 0) gridPos2.y = par->NYC - 1;
if (gridPos.y == par->NYC - 1) gridPos2.y = 0;
gridPos2.x = gridPos.x + x;
if ((gridPos2.x < 0) || (gridPos2.x > par->NXC - 1)) continue;
uint gridHash = calcGridHash(gridPos2, par);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff)
{
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++)
{
real2 dpos, dvel;
real2 pos1 = p[index].pos;
if (gridPos.y == 0) pos1.y += par->YCV;
if (gridPos.y == par->NYC - 1) pos1.y -= par->YCV;
real2 pos2 = pPDPF[j].pos;
real2 vel1 = p[index].vel;
real2 vel2 = pPDPF[j].vel;
calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
}
}
}
if (par->T_BOUNDARY_PERIODICITY == 1)
{
if (((gridPos.x == 0) && (gridPos.y == 0))
|| (gridPos.x == 0) && (gridPos.y == par->NYC - 1)
|| (gridPos.x == par->NXC - 1) && (gridPos.y == 0)
|| (gridPos.x == par->NXC - 1) && (gridPos.y == par->NYC - 1))
{
int2 gridPos2;
if (gridPos.x == 0) gridPos2.x = par->NXC - 1;
else gridPos2.x = 0;
if (gridPos.y == 0) gridPos2.y = par->NYC - 1;
else gridPos2.y = 0;
uint gridHash = calcGridHash(gridPos2, par);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff)
{
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++)
{
real2 dpos, dvel;
real2 pos1 = p[index].pos;
if (gridPos.x == 0) pos1.x += par->XCV;
else pos1.x -= par->XCV;
if (gridPos.y == 0) pos1.y += par->YCV;
else pos1.y -= par->YCV;
real2 pos2 = pPDPF[j].pos;
real2 vel1 = p[index].vel;
real2 vel2 = pPDPF[j].vel;
calcRelPosVelNoSlip(pos1, pos2, vel1, vel2, 0, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
}
}
}
if (par->T_BOUNDARY_PERIODICITY == 2)
{
if (((gridPos.x == 0) && (gridPos.y == 0))
|| (gridPos.x == 0) && (gridPos.y == par->NYC - 1)
|| (gridPos.x == par->NXC - 1) && (gridPos.y == 0)
|| (gridPos.x == par->NXC - 1) && (gridPos.y == par->NYC - 1))
{
int2 gridPos2;
if (gridPos.x == 0) gridPos2.x = par->NXC - 1;
else gridPos2.x = 0;
gridPos2.y = gridPos.y;
uint gridHash = calcGridHash(gridPos2, par);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff)
{
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++)
{
real2 dpos, dvel;
real2 pos1 = p[index].pos;
if (gridPos.x == 0) pos1.x += par->XCV;
else pos1.x -= par->XCV;
real2 pos2 = pPDPF[j].pos;
real2 vel1 = p[index].vel;
real2 vel2 = pPDPF[j].vel;
if (gridPos.y == par->NYC - 1)
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 1, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
if (gridPos.y == 0)
{
calcRelPosVelNoSlip(pos2, pos1, vel2, vel1, 3, &dpos, &dvel, par);
result += interaction(j, pPDPF, index, p, dpos, dvel, par);
}
}
}
}
}
p[index].rh_vel.x = -( (result.x * p[index].p) + result.z) / p[index].d;
p[index].rh_vel.y = -( (result.y * p[index].p) + result.u) / p[index].d;
p[index].rh_m = -result.v / (pPDPF[0].di);
}
}
|
5d1d95c7bf30d2ed499bb89988f0224c3b931751.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void HelloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main()
{
printf("Hello World from CPU!\n");
hipLaunchKernelGGL(( HelloFromGPU), dim3(1), dim3(10), 0, 0, );
hipDeviceReset();
return 0;
} | 5d1d95c7bf30d2ed499bb89988f0224c3b931751.cu | #include <stdio.h>
__global__ void HelloFromGPU()
{
printf("Hello World from GPU!\n");
}
int main()
{
printf("Hello World from CPU!\n");
HelloFromGPU<<<1, 10>>>();
cudaDeviceReset();
return 0;
} |
3c8ca0ebc134ff1a7d18a5776d669b04215621b5.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layers/blob_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void BlobDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
BlobBatch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
int top_index = 0;
for (size_t i = 0; i < batch->data_.size(); i++, top_index++) {
top[i]->ReshapeLike(*(batch->data_[i]));
// Copy the data
caffe_copy(batch->data_[i]->count(), batch->data_[i]->cpu_data(),
top[top_index]->mutable_cpu_data());
}
if (this->output_labels_) {
for (size_t i = 0; i < batch->labels_.size(); i++, top_index++) {
// Reshape to loaded labels.
top[top_index]->ReshapeLike(*(batch->labels_[i]));
// Copy the labels.
caffe_copy(batch->labels_[i]->count(), batch->labels_[i]->cpu_data(),
top[top_index]->mutable_cpu_data());
}
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BlobDataLayer);
} // namespace caffe
| 3c8ca0ebc134ff1a7d18a5776d669b04215621b5.cu | #include <vector>
#include "caffe/layers/blob_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void BlobDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
BlobBatch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
int top_index = 0;
for (size_t i = 0; i < batch->data_.size(); i++, top_index++) {
top[i]->ReshapeLike(*(batch->data_[i]));
// Copy the data
caffe_copy(batch->data_[i]->count(), batch->data_[i]->cpu_data(),
top[top_index]->mutable_cpu_data());
}
if (this->output_labels_) {
for (size_t i = 0; i < batch->labels_.size(); i++, top_index++) {
// Reshape to loaded labels.
top[top_index]->ReshapeLike(*(batch->labels_[i]));
// Copy the labels.
caffe_copy(batch->labels_[i]->count(), batch->labels_[i]->cpu_data(),
top[top_index]->mutable_cpu_data());
}
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BlobDataLayer);
} // namespace caffe
|
c42048c8bb3f9a69f23596828d39400f3375e9bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "transformer.h"
texture<uint8_t, 2, hipReadModeElementType> tex_ref; // TEXTURE REFERENCE MUST BE A GLOBAL VARIABLE
const int THREAD_LIMIT = 960;
const int PARAM_NUM = 9;
__constant__ float mat[2 * PARAM_NUM];
__constant__ float cube[6 * PARAM_NUM] = {0.5, 0.5, 0.5, 0.0, 0.0, -1.0, 0.0, -1.0, 0.0,
0.5, 0.5, -0.5, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0,
-0.5, 0.5, -0.5, 0.0, 0.0, 1.0, 0.0, -1.0, 0.0,
-0.5, -0.5, -0.5, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0,
-0.5, -0.5, 0.5, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0,
-0.5, 0.5, 0.5, 0.0, 0.0, -1.0, 1.0, 0.0, 0.0};
__device__ float CUDADividedByPi(float val) {return 0.3183098861837907 * val;}
__device__ float CUDATimesPi(float val) {return 3.141592653589793 * val;}
__global__ void CUDAScaleKernel(uint8_t* transformed_data,
int w,
float rw,
float rh,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
transformed_data[i] = tex2D(tex_ref,
(__int2float_rn(i % w) + 0.5) * rw,
(__int2float_rn(i / w) + 0.5) * rh);
}
}
__global__ void CUDARotateKernel(uint8_t* transformed_data,
int w,
float rw,
float rh,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
float su, cu, sv, cv;
sincospif(__fmaf_rn(__fmaf_rn(2.0, __int2float_rn(i % w), 1.0), rw, -1.0), &su, &cu);
sincospif(-__fmaf_rn(__fmaf_rn(1.0, __int2float_rn(i / w), 0.5), rh, -0.5), &sv, &cv);
float x = -su * cv, y = sv, z = -cu * cv;
transformed_data[i] = tex2D(tex_ref,
CUDADividedByPi(0.5 * atan2f(__fmaf_rn(mat[0], x,
__fmaf_rn(mat[1], y,
mat[2] * z)),
__fmaf_rn(mat[6], x,
__fmaf_rn(mat[7], y,
mat[8] * z)))),
CUDADividedByPi(acosf(__fmaf_rn(mat[3], x,
__fmaf_rn(mat[4], y,
mat[5] * z)))));
}
}
__global__ void CUDATransformKernelOERP(uint8_t* transformed_data,
int w,
float rw,
float rh,
float dx,
float dy,
float dz,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
float su, cu, sv, cv;
sincospif(__fmaf_rn(__fmaf_rn(2.0, __int2float_rn(i % w), 1.0), rw, -1.0), &su, &cu);
sincospif(-__fmaf_rn(__fmaf_rn(1.0, __int2float_rn(i / w), 0.5), rh, -0.5), &sv, &cv);
float x = -su * cv, y = sv, z = -cu * cv;
float m = __fmaf_rn(x, dx, __fmaf_rn(y, dy, z * dz));
float t = m + __fsqrt_rn(__fmaf_rn(m, m, 1.0 -
__fmaf_rn(dx, dx,
__fmaf_rn(dy, dy, dz * dz))));
x = __fmaf_rn(t, x, -dx);
y = __fmaf_rn(t, y, -dy);
z = __fmaf_rn(t, z, -dz);
float l = __fmaf_rn(x, x, __fmaf_rn(y, y, z * z));
transformed_data[i] = tex2D(tex_ref,
CUDADividedByPi(0.5 * atan2f(__fmaf_rn(mat[0], x,
__fmaf_rn(mat[1], y,
mat[2] * z)),
__fmaf_rn(mat[6], x,
__fmaf_rn(mat[7], y,
mat[8] * z)))),
CUDADividedByPi(acosf(__frsqrt_rn(l) *
__fmaf_rn(mat[3], x,
__fmaf_rn(mat[4], y,
mat[5] * z)))));
}
}
__global__ void CUDATransformKernelCMP(uint8_t* transformed_data,
int w,
float rw,
float rh,
float ecoef,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
float u = (__int2float_rn(i % w) + 0.5) * rw,
v = (__int2float_rn(i / w) + 0.5) * rh;
int fu = __float2int_rd(3.0 * u),
fv = __float2int_rd(2.0 * v);
u = __fmaf_rn(__fmaf_rn(3.0, u, -__int2float_rn(fu)), ecoef, -__fmaf_rn(0.5, ecoef, -0.5));
v = __fmaf_rn(__fmaf_rn(2.0, v, -__int2float_rn(fv)), ecoef, -__fmaf_rn(0.5, ecoef, -0.5));
int s = 9 * (3 * fv + fu);
float x = __fmaf_rn(cube[s + 6], v,
__fmaf_rn(cube[s + 3], u,
cube[s])),
y = __fmaf_rn(cube[s + 7], v,
__fmaf_rn(cube[s + 4], u,
cube[s + 1])),
z = __fmaf_rn(cube[s + 8], v,
__fmaf_rn(cube[s + 5], u,
cube[s + 2]));
float l = __frsqrt_rn(__fmaf_rn(x, x, __fmaf_rn(y, y, z * z)));
x *= l; y *= l; z *= l;
transformed_data[i] = tex2D(tex_ref,
CUDADividedByPi(0.5 * atan2f(__fmaf_rn(mat[0], x,
__fmaf_rn(mat[1], y,
mat[2] * z)),
__fmaf_rn(mat[6], x,
__fmaf_rn(mat[7], y,
mat[8] * z)))),
CUDADividedByPi(acosf(__fmaf_rn(mat[3], x,
__fmaf_rn(mat[4], y,
mat[5] * z)))));
}
}
__global__ void CUDATransformKernelOCMP(uint8_t* transformed_data,
int w,
float rw,
float rh,
float ecoef,
float dx,
float dy,
float dz,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
float u = (__int2float_rn(i % w) + 0.5) * rw,
v = (__int2float_rn(i / w) + 0.5) * rh;
int fu = __float2int_rd(3.0 * u),
fv = __float2int_rd(2.0 * v);
u = __fmaf_rn(__fmaf_rn(3.0, u, -__int2float_rn(fu)), ecoef, -__fmaf_rn(0.5, ecoef, -0.5));
v = __fmaf_rn(__fmaf_rn(2.0, v, -__int2float_rn(fv)), ecoef, -__fmaf_rn(0.5, ecoef, -0.5));
int s = 9 * (3 * fv + fu);
float x = __fmaf_rn(cube[s + 6], v,
__fmaf_rn(cube[s + 3], u,
cube[s])),
y = __fmaf_rn(cube[s + 7], v,
__fmaf_rn(cube[s + 4], u,
cube[s + 1])),
z = __fmaf_rn(cube[s + 8], v,
__fmaf_rn(cube[s + 5], u,
cube[s + 2]));
float l = __frsqrt_rn(__fmaf_rn(x, x, __fmaf_rn(y, y, z * z)));
x *= l; y *= l; z *= l;
float m = __fmaf_rn(x, dx, __fmaf_rn(y, dy, z * dz));
float t = m + __fsqrt_rn(__fmaf_rn(m, m, 1.0 -
__fmaf_rn(dx, dx,
__fmaf_rn(dy, dy, dz * dz))));
x = __fmaf_rn(t, x, -dx);
y = __fmaf_rn(t, y, -dy);
z = __fmaf_rn(t, z, -dz);
l = __fmaf_rn(x, x, __fmaf_rn(y, y, z * z));
transformed_data[i] = tex2D(tex_ref,
CUDADividedByPi(0.5 * atan2f(__fmaf_rn(mat[0], x,
__fmaf_rn(mat[1], y,
mat[2] * z)),
__fmaf_rn(mat[6], x,
__fmaf_rn(mat[7], y,
mat[8] * z)))),
CUDADividedByPi(acosf(__frsqrt_rn(l) *
__fmaf_rn(mat[3], x,
__fmaf_rn(mat[4], y,
mat[5] * z)))));
}
}
__global__ void CUDATransformKernelEAC(uint8_t* transformed_data,
int w,
float rw,
float rh,
float ecoef,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
float u = (__int2float_rn(i % w) + 0.5) * rw,
v = (__int2float_rn(i / w) + 0.5) * rh;
int fu = __float2int_rd(3.0 * u),
fv = __float2int_rd(2.0 * v);
u = __fmaf_rn(0.5 * ecoef, tanf(CUDATimesPi(__fmaf_rn(1.5, u, -__fmaf_rn(0.5, __int2float_rn(fu), 0.25)))), 0.5);
v = __fmaf_rn(0.5 * ecoef, tanf(CUDATimesPi(__fmaf_rn(1.0, v, -__fmaf_rn(0.5, __int2float_rn(fv), 0.25)))), 0.5);
int s = 9 * (3 * fv + fu);
float x = __fmaf_rn(cube[s + 6], v,
__fmaf_rn(cube[s + 3], u,
cube[s])),
y = __fmaf_rn(cube[s + 7], v,
__fmaf_rn(cube[s + 4], u,
cube[s + 1])),
z = __fmaf_rn(cube[s + 8], v,
__fmaf_rn(cube[s + 5], u,
cube[s + 2]));
float l = __frsqrt_rn(__fmaf_rn(x, x, __fmaf_rn(y, y, z * z)));
x *= l; y *= l; z *= l;
transformed_data[i] = tex2D(tex_ref,
CUDADividedByPi(0.5 * atan2f(__fmaf_rn(mat[0], x,
__fmaf_rn(mat[1], y,
mat[2] * z)),
__fmaf_rn(mat[6], x,
__fmaf_rn(mat[7], y,
mat[8] * z)))),
CUDADividedByPi(acosf(__fmaf_rn(mat[3], x,
__fmaf_rn(mat[4], y,
mat[5] * z)))));
}
}
__global__ void CUDATransformKernelOEAC(uint8_t* transformed_data,
int w,
float rw,
float rh,
float ecoef,
float dx,
float dy,
float dz,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
float u = (__int2float_rn(i % w) + 0.5) * rw,
v = (__int2float_rn(i / w) + 0.5) * rh;
int fu = __float2int_rd(3.0 * u),
fv = __float2int_rd(2.0 * v);
u = __fmaf_rn(0.5 * ecoef, tanf(CUDATimesPi(__fmaf_rn(1.5, u, -__fmaf_rn(0.5, __int2float_rn(fu), 0.25)))), 0.5);
v = __fmaf_rn(0.5 * ecoef, tanf(CUDATimesPi(__fmaf_rn(1.0, v, -__fmaf_rn(0.5, __int2float_rn(fv), 0.25)))), 0.5);
int s = 9 * (3 * fv + fu);
float x = __fmaf_rn(cube[s + 6], v,
__fmaf_rn(cube[s + 3], u,
cube[s])),
y = __fmaf_rn(cube[s + 7], v,
__fmaf_rn(cube[s + 4], u,
cube[s + 1])),
z = __fmaf_rn(cube[s + 8], v,
__fmaf_rn(cube[s + 5], u,
cube[s + 2]));
float l = __frsqrt_rn(__fmaf_rn(x, x, __fmaf_rn(y, y, z * z)));
x *= l; y *= l; z *= l;
float m = __fmaf_rn(x, dx, __fmaf_rn(y, dy, z * dz));
float t = m + __fsqrt_rn(__fmaf_rn(m, m, 1.0 -
__fmaf_rn(dx, dx,
__fmaf_rn(dy, dy, dz * dz))));
x = __fmaf_rn(t, x, -dx);
y = __fmaf_rn(t, y, -dy);
z = __fmaf_rn(t, z, -dz);
l = __fmaf_rn(x, x, __fmaf_rn(y, y, z * z));
transformed_data[i] = tex2D(tex_ref,
CUDADividedByPi(0.5 * atan2f(__fmaf_rn(mat[0], x,
__fmaf_rn(mat[1], y,
mat[2] * z)),
__fmaf_rn(mat[6], x,
__fmaf_rn(mat[7], y,
mat[8] * z)))),
CUDADividedByPi(acosf(__frsqrt_rn(l) *
__fmaf_rn(mat[3], x,
__fmaf_rn(mat[4], y,
mat[5] * z)))));
}
}
__global__ void CUDATransformKernelBEP(uint8_t* transformed_data,
int w,
float rw,
float rh,
float ecoef,
int s,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
float su, cu, sv, cv;
sincospif(__fmaf_rn(__fmaf_rn(1.5, __int2float_rn(i % w), 0.75), rw, -0.75), &su, &cu);
sincospif(__fmaf_rn(__fmaf_rn(0.5, __int2float_rn(i / w), 0.25), rh, -0.25) * -ecoef, &sv, &cv);
float x = -su * cv, y = sv, z = -cu * cv;
transformed_data[i] = tex2D(tex_ref,
CUDADividedByPi(0.5 * atan2f(__fmaf_rn(mat[s], x,
__fmaf_rn(mat[s + 1], y,
mat[s + 2] * z)),
__fmaf_rn(mat[s + 6], x,
__fmaf_rn(mat[s + 7], y,
mat[s + 8] * z)))),
CUDADividedByPi(acosf(__fmaf_rn(mat[s + 3], x,
__fmaf_rn(mat[s + 4], y,
mat[s + 5] * z)))));
}
}
__global__ void CUDARenderKernel() {}
float Radians(float degrees) {return 0.017453292519943295 * degrees;}
void RotationMatrix(float y, float p, float r, float* out) {
float sy = sin(y), cy = cos(y),
sp = sin(p), cp = cos(p),
sr = sin(r), cr = cos(r);
*out++ = sy * sp * sr + cy * cr;
*out++ = sy * sp * cr - cy * sr;
*out++ = sy * cp;
*out++ = cp * sr;
*out++ = cp * cr;
*out++ = -sp;
*out++ = cy * sp * sr - sy * cr;
*out++ = cy * sp * cr + sy * sr;
*out = cy * cp;
}
int CUDAScaleWrapper(const uint8_t* data,
int width,
int height,
int target_width,
int target_height,
uint8_t* out_data) {
hipArray* cuArray;
hipMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
hipMemcpyToArray(cuArray, 0, 0, data, width * height, hipMemcpyHostToDevice);
hipBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = hipAddressModeWrap;
tex_ref.addressMode[1] = hipAddressModeWrap;
tex_ref.filterMode = hipFilterModePoint; // "hipFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
int n = target_width * target_height;
float rw = 1.0 / target_width, rh = 1.0 / target_height;
uint8_t* cuda_data = NULL;
hipMalloc((void**)&cuda_data, n);
int nt = min(max(target_width >> 1, target_height), THREAD_LIMIT);
hipLaunchKernelGGL(( CUDAScaleKernel), dim3(n / nt), dim3(nt), 0, 0, cuda_data,
target_width,
rw, rh, n);
hipDeviceSynchronize();
// fprintf(stderr, "%s\n", hipGetErrorName(hipGetLastError())); // DEBUG
hipMemcpy(out_data, cuda_data, n, hipMemcpyDeviceToHost);
hipFree(cuda_data);
hipUnbindTexture(tex_ref);
hipFreeArray(cuArray);
return 0;
}
int CUDARotateWrapper(const uint8_t* data,
int width,
int height,
int target_width,
int target_height,
float yaw,
float pitch,
float roll,
uint8_t* out_data) {
hipArray* cuArray;
hipMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
hipMemcpyToArray(cuArray, 0, 0, data, width * height, hipMemcpyHostToDevice);
hipBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = hipAddressModeWrap;
tex_ref.addressMode[1] = hipAddressModeWrap;
tex_ref.filterMode = hipFilterModePoint; // "hipFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
hipMemcpyToSymbol(mat, host_mat, PARAM_NUM * sizeof(float));
int n = target_width * target_height;
float rw = 1.0 / target_width, rh = 1.0 / target_height;
uint8_t* cuda_data = NULL;
hipMalloc((void**)&cuda_data, n);
int nt = min(max(target_width >> 1, target_height), THREAD_LIMIT);
hipLaunchKernelGGL(( CUDARotateKernel), dim3(n / nt), dim3(nt), 0, 0, cuda_data,
target_width,
rw, rh, n);
hipDeviceSynchronize();
// fprintf(stderr, "%s\n", hipGetErrorName(hipGetLastError())); // DEBUG
hipMemcpy(out_data, cuda_data, n, hipMemcpyDeviceToHost);
hipFree(cuda_data);
free(host_mat);
hipUnbindTexture(tex_ref);
hipFreeArray(cuArray);
return 0;
}
int CUDATransformWrapper(const uint8_t* data,
int width,
int height,
int target_width,
int target_height,
float yaw,
float pitch,
float roll,
float x,
float y,
float z,
float ecoef,
uint8_t* out_data,
int flag) {
switch (flag) {
case OFFSET_EQUIRECT_21:
case OFFSET_EQUIRECT:
{
hipArray* cuArray;
hipMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
hipMemcpyToArray(cuArray, 0, 0, data, width * height, hipMemcpyHostToDevice);
hipBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = hipAddressModeWrap;
tex_ref.addressMode[1] = hipAddressModeWrap;
tex_ref.filterMode = hipFilterModePoint; // "hipFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
hipMemcpyToSymbol(mat, host_mat, PARAM_NUM * sizeof(float));
int n = target_width * target_height;
float rw = 1.0 / target_width, rh = 1.0 / target_height;
uint8_t* cuda_data = NULL;
hipMalloc((void**)&cuda_data, n);
int nt = min(max(target_width >> 1, target_height), THREAD_LIMIT);
hipLaunchKernelGGL(( CUDATransformKernelOERP), dim3(n / nt), dim3(nt), 0, 0, cuda_data,
target_width,
rw, rh,
x, y, z, n);
hipDeviceSynchronize();
// fprintf(stderr, "%s\n", hipGetErrorName(hipGetLastError())); // DEBUG
hipMemcpy(out_data, cuda_data, n, hipMemcpyDeviceToHost);
hipFree(cuda_data);
free(host_mat);
hipUnbindTexture(tex_ref);
hipFreeArray(cuArray);
break;
}
case CUBEMAP_32:
case CUBEMAP:
{
hipArray* cuArray;
hipMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
hipMemcpyToArray(cuArray, 0, 0, data, width * height, hipMemcpyHostToDevice);
hipBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = hipAddressModeWrap;
tex_ref.addressMode[1] = hipAddressModeWrap;
tex_ref.filterMode = hipFilterModePoint; // "hipFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
hipMemcpyToSymbol(mat, host_mat, PARAM_NUM * sizeof(float));
int n = target_width * target_height;
float rw = 1.0 / target_width, rh = 1.0 / target_height;
uint8_t* cuda_data = NULL;
hipMalloc((void**)&cuda_data, n);
int nt = min(max(target_width / 3, target_height >> 1), THREAD_LIMIT);
hipLaunchKernelGGL(( CUDATransformKernelCMP), dim3(n / nt), dim3(nt), 0, 0, cuda_data,
target_width,
rw, rh,
ecoef, n);
hipDeviceSynchronize();
// fprintf(stderr, "%s\n", hipGetErrorName(hipGetLastError())); // DEBUG
hipMemcpy(out_data, cuda_data, n, hipMemcpyDeviceToHost);
hipFree(cuda_data);
free(host_mat);
hipUnbindTexture(tex_ref);
hipFreeArray(cuArray);
break;
}
case OFFSET_CUBEMAP_32:
case OFFSET_CUBEMAP:
{
hipArray* cuArray;
hipMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
hipMemcpyToArray(cuArray, 0, 0, data, width * height, hipMemcpyHostToDevice);
hipBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = hipAddressModeWrap;
tex_ref.addressMode[1] = hipAddressModeWrap;
tex_ref.filterMode = hipFilterModePoint; // "hipFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
hipMemcpyToSymbol(mat, host_mat, PARAM_NUM * sizeof(float));
int n = target_width * target_height;
float rw = 1.0 / target_width, rh = 1.0 / target_height;
uint8_t* cuda_data = NULL;
hipMalloc((void**)&cuda_data, n);
int nt = min(max(target_width / 3, target_height >> 1), THREAD_LIMIT);
hipLaunchKernelGGL(( CUDATransformKernelOCMP), dim3(n / nt), dim3(nt), 0, 0, cuda_data,
target_width,
rw, rh, ecoef,
x, y, z, n);
hipDeviceSynchronize();
// fprintf(stderr, "%s\n", hipGetErrorName(hipGetLastError())); // DEBUG
hipMemcpy(out_data, cuda_data, n, hipMemcpyDeviceToHost);
hipFree(cuda_data);
free(host_mat);
hipUnbindTexture(tex_ref);
hipFreeArray(cuArray);
break;
}
case EAC_32:
case EAC:
{
hipArray* cuArray;
hipMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
hipMemcpyToArray(cuArray, 0, 0, data, width * height, hipMemcpyHostToDevice);
hipBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = hipAddressModeWrap;
tex_ref.addressMode[1] = hipAddressModeWrap;
tex_ref.filterMode = hipFilterModePoint; // "hipFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
hipMemcpyToSymbol(mat, host_mat, PARAM_NUM * sizeof(float));
int n = target_width * target_height;
float rw = 1.0 / target_width, rh = 1.0 / target_height;
uint8_t* cuda_data = NULL;
hipMalloc((void**)&cuda_data, n);
int nt = min(max(target_width / 3, target_height >> 1), THREAD_LIMIT);
hipLaunchKernelGGL(( CUDATransformKernelEAC), dim3(n / nt), dim3(nt), 0, 0, cuda_data,
target_width,
rw, rh,
ecoef, n);
hipDeviceSynchronize();
// fprintf(stderr, "%s\n", hipGetErrorName(hipGetLastError())); // DEBUG
hipMemcpy(out_data, cuda_data, n, hipMemcpyDeviceToHost);
hipFree(cuda_data);
free(host_mat);
hipUnbindTexture(tex_ref);
hipFreeArray(cuArray);
break;
}
case OFFSET_EAC_32:
case OFFSET_EAC:
{
hipArray* cuArray;
hipMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
hipMemcpyToArray(cuArray, 0, 0, data, width * height, hipMemcpyHostToDevice);
hipBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = hipAddressModeWrap;
tex_ref.addressMode[1] = hipAddressModeWrap;
tex_ref.filterMode = hipFilterModePoint; // "hipFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
hipMemcpyToSymbol(mat, host_mat, PARAM_NUM * sizeof(float));
int n = target_width * target_height;
float rw = 1.0 / target_width, rh = 1.0 / target_height;
uint8_t* cuda_data = NULL;
hipMalloc((void**)&cuda_data, n);
int nt = min(max(target_width / 3, target_height >> 1), THREAD_LIMIT);
hipLaunchKernelGGL(( CUDATransformKernelOEAC), dim3(n / nt), dim3(nt), 0, 0, cuda_data,
target_width,
rw, rh, ecoef,
x, y, z, n);
hipDeviceSynchronize();
// fprintf(stderr, "%s\n", hipGetErrorName(hipGetLastError())); // DEBUG
hipMemcpy(out_data, cuda_data, n, hipMemcpyDeviceToHost);
hipFree(cuda_data);
free(host_mat);
hipUnbindTexture(tex_ref);
hipFreeArray(cuArray);
break;
}
case BASEBALL_EQUIRECT_32:
case BASEBALL_EQUIRECT:
{
hipArray* cuArray;
hipMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
hipMemcpyToArray(cuArray, 0, 0, data, width * height, hipMemcpyHostToDevice);
hipBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = hipAddressModeWrap;
tex_ref.addressMode[1] = hipAddressModeWrap;
tex_ref.filterMode = hipFilterModePoint; // "hipFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(2 * PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
RotationMatrix(Radians(180.0 + yaw), Radians(-pitch), Radians(-90.0 + roll), PARAM_NUM + host_mat);
hipMemcpyToSymbol(mat, host_mat, 2 * PARAM_NUM * sizeof(float));
int nh = target_width * target_height >> 1;
float rw = 1.0 / target_width, rhh = 2.0 / target_height;
uint8_t* cuda_data = NULL;
hipMalloc((void**)&cuda_data, nh << 1);
int nt = min(max(target_width / 3, target_height >> 1), THREAD_LIMIT);
hipLaunchKernelGGL(( CUDATransformKernelBEP), dim3(nh / nt), dim3(nt), 0, 0, cuda_data,
target_width,
rw, rhh, ecoef,
0, nh);
hipLaunchKernelGGL(( CUDATransformKernelBEP), dim3(nh / nt), dim3(nt), 0, 0, nh + cuda_data,
target_width,
rw, rhh, ecoef,
PARAM_NUM, nh);
hipDeviceSynchronize();
// fprintf(stderr, "%s\n", hipGetErrorName(hipGetLastError())); // DEBUG
hipMemcpy(out_data, cuda_data, nh << 1, hipMemcpyDeviceToHost);
hipFree(cuda_data);
free(host_mat);
hipUnbindTexture(tex_ref);
hipFreeArray(cuArray);
break;
}
case SHIFT_BASEBALL_EQUIRECT_21:
{
hipArray* cuArray;
hipMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
hipMemcpyToArray(cuArray, 0, 0, data, width * height, hipMemcpyHostToDevice);
hipBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = hipAddressModeWrap;
tex_ref.addressMode[1] = hipAddressModeWrap;
tex_ref.filterMode = hipFilterModePoint; // "hipFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(2 * PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
RotationMatrix(Radians(180.0 + yaw), Radians(-pitch), Radians(-90.0 + roll), PARAM_NUM + host_mat);
hipMemcpyToSymbol(mat, host_mat, 2 * PARAM_NUM * sizeof(float));
int target_height_up = target_width / 3;
int nu = target_width * target_height_up;
int target_height_dn = target_height - target_height_up;
int nd = target_width * target_height_dn;
float rw = 1.0 / target_width, rhu = 1.0 / target_height_up, rhd = 1.0 / target_height_dn;
uint8_t* cuda_data = NULL;
hipMalloc((void**)&cuda_data, nu + nd);
int nt = min(max(target_width / 3, target_height_up), THREAD_LIMIT);
hipLaunchKernelGGL(( CUDATransformKernelBEP), dim3(nu / nt), dim3(nt), 0, 0, cuda_data,
target_width,
rw, rhu, ecoef,
0, nu);
hipLaunchKernelGGL(( CUDATransformKernelBEP), dim3(nd / nt), dim3(nt), 0, 0, nu + cuda_data,
target_width,
rw, rhd, ecoef,
PARAM_NUM, nd);
hipDeviceSynchronize();
// fprintf(stderr, "%s\n", hipGetErrorName(hipGetLastError())); // DEBUG
hipMemcpy(out_data, cuda_data, nu + nd, hipMemcpyDeviceToHost);
hipFree(cuda_data);
free(host_mat);
hipUnbindTexture(tex_ref);
hipFreeArray(cuArray);
break;
}
default:
fprintf(stderr, "CUDATransformWrapper(): Invalid Flag\n");
return 1;
}
return 0;
}
int CUDARenderWrapper(const uint8_t* data,
int width,
int height,
int target_width,
int target_height,
float yaw,
float pitch,
float roll,
float x,
float y,
float z,
float ecoef,
uint8_t* out_data,
int flag) {
return 0;
}
| c42048c8bb3f9a69f23596828d39400f3375e9bd.cu | #include "transformer.h"
texture<uint8_t, 2, cudaReadModeElementType> tex_ref; // TEXTURE REFERENCE MUST BE A GLOBAL VARIABLE
const int THREAD_LIMIT = 960;
const int PARAM_NUM = 9;
__constant__ float mat[2 * PARAM_NUM];
__constant__ float cube[6 * PARAM_NUM] = {0.5, 0.5, 0.5, 0.0, 0.0, -1.0, 0.0, -1.0, 0.0,
0.5, 0.5, -0.5, -1.0, 0.0, 0.0, 0.0, -1.0, 0.0,
-0.5, 0.5, -0.5, 0.0, 0.0, 1.0, 0.0, -1.0, 0.0,
-0.5, -0.5, -0.5, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0,
-0.5, -0.5, 0.5, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0,
-0.5, 0.5, 0.5, 0.0, 0.0, -1.0, 1.0, 0.0, 0.0};
__device__ float CUDADividedByPi(float val) {return 0.3183098861837907 * val;}
__device__ float CUDATimesPi(float val) {return 3.141592653589793 * val;}
__global__ void CUDAScaleKernel(uint8_t* transformed_data,
int w,
float rw,
float rh,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
transformed_data[i] = tex2D(tex_ref,
(__int2float_rn(i % w) + 0.5) * rw,
(__int2float_rn(i / w) + 0.5) * rh);
}
}
__global__ void CUDARotateKernel(uint8_t* transformed_data,
int w,
float rw,
float rh,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
float su, cu, sv, cv;
sincospif(__fmaf_rn(__fmaf_rn(2.0, __int2float_rn(i % w), 1.0), rw, -1.0), &su, &cu);
sincospif(-__fmaf_rn(__fmaf_rn(1.0, __int2float_rn(i / w), 0.5), rh, -0.5), &sv, &cv);
float x = -su * cv, y = sv, z = -cu * cv;
transformed_data[i] = tex2D(tex_ref,
CUDADividedByPi(0.5 * atan2f(__fmaf_rn(mat[0], x,
__fmaf_rn(mat[1], y,
mat[2] * z)),
__fmaf_rn(mat[6], x,
__fmaf_rn(mat[7], y,
mat[8] * z)))),
CUDADividedByPi(acosf(__fmaf_rn(mat[3], x,
__fmaf_rn(mat[4], y,
mat[5] * z)))));
}
}
__global__ void CUDATransformKernelOERP(uint8_t* transformed_data,
int w,
float rw,
float rh,
float dx,
float dy,
float dz,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
float su, cu, sv, cv;
sincospif(__fmaf_rn(__fmaf_rn(2.0, __int2float_rn(i % w), 1.0), rw, -1.0), &su, &cu);
sincospif(-__fmaf_rn(__fmaf_rn(1.0, __int2float_rn(i / w), 0.5), rh, -0.5), &sv, &cv);
float x = -su * cv, y = sv, z = -cu * cv;
float m = __fmaf_rn(x, dx, __fmaf_rn(y, dy, z * dz));
float t = m + __fsqrt_rn(__fmaf_rn(m, m, 1.0 -
__fmaf_rn(dx, dx,
__fmaf_rn(dy, dy, dz * dz))));
x = __fmaf_rn(t, x, -dx);
y = __fmaf_rn(t, y, -dy);
z = __fmaf_rn(t, z, -dz);
float l = __fmaf_rn(x, x, __fmaf_rn(y, y, z * z));
transformed_data[i] = tex2D(tex_ref,
CUDADividedByPi(0.5 * atan2f(__fmaf_rn(mat[0], x,
__fmaf_rn(mat[1], y,
mat[2] * z)),
__fmaf_rn(mat[6], x,
__fmaf_rn(mat[7], y,
mat[8] * z)))),
CUDADividedByPi(acosf(__frsqrt_rn(l) *
__fmaf_rn(mat[3], x,
__fmaf_rn(mat[4], y,
mat[5] * z)))));
}
}
__global__ void CUDATransformKernelCMP(uint8_t* transformed_data,
int w,
float rw,
float rh,
float ecoef,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
float u = (__int2float_rn(i % w) + 0.5) * rw,
v = (__int2float_rn(i / w) + 0.5) * rh;
int fu = __float2int_rd(3.0 * u),
fv = __float2int_rd(2.0 * v);
u = __fmaf_rn(__fmaf_rn(3.0, u, -__int2float_rn(fu)), ecoef, -__fmaf_rn(0.5, ecoef, -0.5));
v = __fmaf_rn(__fmaf_rn(2.0, v, -__int2float_rn(fv)), ecoef, -__fmaf_rn(0.5, ecoef, -0.5));
int s = 9 * (3 * fv + fu);
float x = __fmaf_rn(cube[s + 6], v,
__fmaf_rn(cube[s + 3], u,
cube[s])),
y = __fmaf_rn(cube[s + 7], v,
__fmaf_rn(cube[s + 4], u,
cube[s + 1])),
z = __fmaf_rn(cube[s + 8], v,
__fmaf_rn(cube[s + 5], u,
cube[s + 2]));
float l = __frsqrt_rn(__fmaf_rn(x, x, __fmaf_rn(y, y, z * z)));
x *= l; y *= l; z *= l;
transformed_data[i] = tex2D(tex_ref,
CUDADividedByPi(0.5 * atan2f(__fmaf_rn(mat[0], x,
__fmaf_rn(mat[1], y,
mat[2] * z)),
__fmaf_rn(mat[6], x,
__fmaf_rn(mat[7], y,
mat[8] * z)))),
CUDADividedByPi(acosf(__fmaf_rn(mat[3], x,
__fmaf_rn(mat[4], y,
mat[5] * z)))));
}
}
__global__ void CUDATransformKernelOCMP(uint8_t* transformed_data,
int w,
float rw,
float rh,
float ecoef,
float dx,
float dy,
float dz,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
float u = (__int2float_rn(i % w) + 0.5) * rw,
v = (__int2float_rn(i / w) + 0.5) * rh;
int fu = __float2int_rd(3.0 * u),
fv = __float2int_rd(2.0 * v);
u = __fmaf_rn(__fmaf_rn(3.0, u, -__int2float_rn(fu)), ecoef, -__fmaf_rn(0.5, ecoef, -0.5));
v = __fmaf_rn(__fmaf_rn(2.0, v, -__int2float_rn(fv)), ecoef, -__fmaf_rn(0.5, ecoef, -0.5));
int s = 9 * (3 * fv + fu);
float x = __fmaf_rn(cube[s + 6], v,
__fmaf_rn(cube[s + 3], u,
cube[s])),
y = __fmaf_rn(cube[s + 7], v,
__fmaf_rn(cube[s + 4], u,
cube[s + 1])),
z = __fmaf_rn(cube[s + 8], v,
__fmaf_rn(cube[s + 5], u,
cube[s + 2]));
float l = __frsqrt_rn(__fmaf_rn(x, x, __fmaf_rn(y, y, z * z)));
x *= l; y *= l; z *= l;
float m = __fmaf_rn(x, dx, __fmaf_rn(y, dy, z * dz));
float t = m + __fsqrt_rn(__fmaf_rn(m, m, 1.0 -
__fmaf_rn(dx, dx,
__fmaf_rn(dy, dy, dz * dz))));
x = __fmaf_rn(t, x, -dx);
y = __fmaf_rn(t, y, -dy);
z = __fmaf_rn(t, z, -dz);
l = __fmaf_rn(x, x, __fmaf_rn(y, y, z * z));
transformed_data[i] = tex2D(tex_ref,
CUDADividedByPi(0.5 * atan2f(__fmaf_rn(mat[0], x,
__fmaf_rn(mat[1], y,
mat[2] * z)),
__fmaf_rn(mat[6], x,
__fmaf_rn(mat[7], y,
mat[8] * z)))),
CUDADividedByPi(acosf(__frsqrt_rn(l) *
__fmaf_rn(mat[3], x,
__fmaf_rn(mat[4], y,
mat[5] * z)))));
}
}
__global__ void CUDATransformKernelEAC(uint8_t* transformed_data,
int w,
float rw,
float rh,
float ecoef,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
float u = (__int2float_rn(i % w) + 0.5) * rw,
v = (__int2float_rn(i / w) + 0.5) * rh;
int fu = __float2int_rd(3.0 * u),
fv = __float2int_rd(2.0 * v);
u = __fmaf_rn(0.5 * ecoef, tanf(CUDATimesPi(__fmaf_rn(1.5, u, -__fmaf_rn(0.5, __int2float_rn(fu), 0.25)))), 0.5);
v = __fmaf_rn(0.5 * ecoef, tanf(CUDATimesPi(__fmaf_rn(1.0, v, -__fmaf_rn(0.5, __int2float_rn(fv), 0.25)))), 0.5);
int s = 9 * (3 * fv + fu);
float x = __fmaf_rn(cube[s + 6], v,
__fmaf_rn(cube[s + 3], u,
cube[s])),
y = __fmaf_rn(cube[s + 7], v,
__fmaf_rn(cube[s + 4], u,
cube[s + 1])),
z = __fmaf_rn(cube[s + 8], v,
__fmaf_rn(cube[s + 5], u,
cube[s + 2]));
float l = __frsqrt_rn(__fmaf_rn(x, x, __fmaf_rn(y, y, z * z)));
x *= l; y *= l; z *= l;
transformed_data[i] = tex2D(tex_ref,
CUDADividedByPi(0.5 * atan2f(__fmaf_rn(mat[0], x,
__fmaf_rn(mat[1], y,
mat[2] * z)),
__fmaf_rn(mat[6], x,
__fmaf_rn(mat[7], y,
mat[8] * z)))),
CUDADividedByPi(acosf(__fmaf_rn(mat[3], x,
__fmaf_rn(mat[4], y,
mat[5] * z)))));
}
}
__global__ void CUDATransformKernelOEAC(uint8_t* transformed_data,
int w,
float rw,
float rh,
float ecoef,
float dx,
float dy,
float dz,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
float u = (__int2float_rn(i % w) + 0.5) * rw,
v = (__int2float_rn(i / w) + 0.5) * rh;
int fu = __float2int_rd(3.0 * u),
fv = __float2int_rd(2.0 * v);
u = __fmaf_rn(0.5 * ecoef, tanf(CUDATimesPi(__fmaf_rn(1.5, u, -__fmaf_rn(0.5, __int2float_rn(fu), 0.25)))), 0.5);
v = __fmaf_rn(0.5 * ecoef, tanf(CUDATimesPi(__fmaf_rn(1.0, v, -__fmaf_rn(0.5, __int2float_rn(fv), 0.25)))), 0.5);
int s = 9 * (3 * fv + fu);
float x = __fmaf_rn(cube[s + 6], v,
__fmaf_rn(cube[s + 3], u,
cube[s])),
y = __fmaf_rn(cube[s + 7], v,
__fmaf_rn(cube[s + 4], u,
cube[s + 1])),
z = __fmaf_rn(cube[s + 8], v,
__fmaf_rn(cube[s + 5], u,
cube[s + 2]));
float l = __frsqrt_rn(__fmaf_rn(x, x, __fmaf_rn(y, y, z * z)));
x *= l; y *= l; z *= l;
float m = __fmaf_rn(x, dx, __fmaf_rn(y, dy, z * dz));
float t = m + __fsqrt_rn(__fmaf_rn(m, m, 1.0 -
__fmaf_rn(dx, dx,
__fmaf_rn(dy, dy, dz * dz))));
x = __fmaf_rn(t, x, -dx);
y = __fmaf_rn(t, y, -dy);
z = __fmaf_rn(t, z, -dz);
l = __fmaf_rn(x, x, __fmaf_rn(y, y, z * z));
transformed_data[i] = tex2D(tex_ref,
CUDADividedByPi(0.5 * atan2f(__fmaf_rn(mat[0], x,
__fmaf_rn(mat[1], y,
mat[2] * z)),
__fmaf_rn(mat[6], x,
__fmaf_rn(mat[7], y,
mat[8] * z)))),
CUDADividedByPi(acosf(__frsqrt_rn(l) *
__fmaf_rn(mat[3], x,
__fmaf_rn(mat[4], y,
mat[5] * z)))));
}
}
__global__ void CUDATransformKernelBEP(uint8_t* transformed_data,
int w,
float rw,
float rh,
float ecoef,
int s,
int n) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < n) {
float su, cu, sv, cv;
sincospif(__fmaf_rn(__fmaf_rn(1.5, __int2float_rn(i % w), 0.75), rw, -0.75), &su, &cu);
sincospif(__fmaf_rn(__fmaf_rn(0.5, __int2float_rn(i / w), 0.25), rh, -0.25) * -ecoef, &sv, &cv);
float x = -su * cv, y = sv, z = -cu * cv;
transformed_data[i] = tex2D(tex_ref,
CUDADividedByPi(0.5 * atan2f(__fmaf_rn(mat[s], x,
__fmaf_rn(mat[s + 1], y,
mat[s + 2] * z)),
__fmaf_rn(mat[s + 6], x,
__fmaf_rn(mat[s + 7], y,
mat[s + 8] * z)))),
CUDADividedByPi(acosf(__fmaf_rn(mat[s + 3], x,
__fmaf_rn(mat[s + 4], y,
mat[s + 5] * z)))));
}
}
__global__ void CUDARenderKernel() {}
float Radians(float degrees) {return 0.017453292519943295 * degrees;}
void RotationMatrix(float y, float p, float r, float* out) {
float sy = sin(y), cy = cos(y),
sp = sin(p), cp = cos(p),
sr = sin(r), cr = cos(r);
*out++ = sy * sp * sr + cy * cr;
*out++ = sy * sp * cr - cy * sr;
*out++ = sy * cp;
*out++ = cp * sr;
*out++ = cp * cr;
*out++ = -sp;
*out++ = cy * sp * sr - sy * cr;
*out++ = cy * sp * cr + sy * sr;
*out = cy * cp;
}
int CUDAScaleWrapper(const uint8_t* data,
int width,
int height,
int target_width,
int target_height,
uint8_t* out_data) {
cudaArray* cuArray;
cudaMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
cudaMemcpyToArray(cuArray, 0, 0, data, width * height, cudaMemcpyHostToDevice);
cudaBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = cudaAddressModeWrap;
tex_ref.addressMode[1] = cudaAddressModeWrap;
tex_ref.filterMode = cudaFilterModePoint; // "cudaFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
int n = target_width * target_height;
float rw = 1.0 / target_width, rh = 1.0 / target_height;
uint8_t* cuda_data = NULL;
cudaMalloc((void**)&cuda_data, n);
int nt = min(max(target_width >> 1, target_height), THREAD_LIMIT);
CUDAScaleKernel<<<n / nt, nt>>>(cuda_data,
target_width,
rw, rh, n);
cudaDeviceSynchronize();
// fprintf(stderr, "%s\n", cudaGetErrorName(cudaGetLastError())); // DEBUG
cudaMemcpy(out_data, cuda_data, n, cudaMemcpyDeviceToHost);
cudaFree(cuda_data);
cudaUnbindTexture(tex_ref);
cudaFreeArray(cuArray);
return 0;
}
int CUDARotateWrapper(const uint8_t* data,
int width,
int height,
int target_width,
int target_height,
float yaw,
float pitch,
float roll,
uint8_t* out_data) {
cudaArray* cuArray;
cudaMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
cudaMemcpyToArray(cuArray, 0, 0, data, width * height, cudaMemcpyHostToDevice);
cudaBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = cudaAddressModeWrap;
tex_ref.addressMode[1] = cudaAddressModeWrap;
tex_ref.filterMode = cudaFilterModePoint; // "cudaFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
cudaMemcpyToSymbol(mat, host_mat, PARAM_NUM * sizeof(float));
int n = target_width * target_height;
float rw = 1.0 / target_width, rh = 1.0 / target_height;
uint8_t* cuda_data = NULL;
cudaMalloc((void**)&cuda_data, n);
int nt = min(max(target_width >> 1, target_height), THREAD_LIMIT);
CUDARotateKernel<<<n / nt, nt>>>(cuda_data,
target_width,
rw, rh, n);
cudaDeviceSynchronize();
// fprintf(stderr, "%s\n", cudaGetErrorName(cudaGetLastError())); // DEBUG
cudaMemcpy(out_data, cuda_data, n, cudaMemcpyDeviceToHost);
cudaFree(cuda_data);
free(host_mat);
cudaUnbindTexture(tex_ref);
cudaFreeArray(cuArray);
return 0;
}
int CUDATransformWrapper(const uint8_t* data,
int width,
int height,
int target_width,
int target_height,
float yaw,
float pitch,
float roll,
float x,
float y,
float z,
float ecoef,
uint8_t* out_data,
int flag) {
switch (flag) {
case OFFSET_EQUIRECT_21:
case OFFSET_EQUIRECT:
{
cudaArray* cuArray;
cudaMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
cudaMemcpyToArray(cuArray, 0, 0, data, width * height, cudaMemcpyHostToDevice);
cudaBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = cudaAddressModeWrap;
tex_ref.addressMode[1] = cudaAddressModeWrap;
tex_ref.filterMode = cudaFilterModePoint; // "cudaFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
cudaMemcpyToSymbol(mat, host_mat, PARAM_NUM * sizeof(float));
int n = target_width * target_height;
float rw = 1.0 / target_width, rh = 1.0 / target_height;
uint8_t* cuda_data = NULL;
cudaMalloc((void**)&cuda_data, n);
int nt = min(max(target_width >> 1, target_height), THREAD_LIMIT);
CUDATransformKernelOERP<<<n / nt, nt>>>(cuda_data,
target_width,
rw, rh,
x, y, z, n);
cudaDeviceSynchronize();
// fprintf(stderr, "%s\n", cudaGetErrorName(cudaGetLastError())); // DEBUG
cudaMemcpy(out_data, cuda_data, n, cudaMemcpyDeviceToHost);
cudaFree(cuda_data);
free(host_mat);
cudaUnbindTexture(tex_ref);
cudaFreeArray(cuArray);
break;
}
case CUBEMAP_32:
case CUBEMAP:
{
cudaArray* cuArray;
cudaMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
cudaMemcpyToArray(cuArray, 0, 0, data, width * height, cudaMemcpyHostToDevice);
cudaBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = cudaAddressModeWrap;
tex_ref.addressMode[1] = cudaAddressModeWrap;
tex_ref.filterMode = cudaFilterModePoint; // "cudaFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
cudaMemcpyToSymbol(mat, host_mat, PARAM_NUM * sizeof(float));
int n = target_width * target_height;
float rw = 1.0 / target_width, rh = 1.0 / target_height;
uint8_t* cuda_data = NULL;
cudaMalloc((void**)&cuda_data, n);
int nt = min(max(target_width / 3, target_height >> 1), THREAD_LIMIT);
CUDATransformKernelCMP<<<n / nt, nt>>>(cuda_data,
target_width,
rw, rh,
ecoef, n);
cudaDeviceSynchronize();
// fprintf(stderr, "%s\n", cudaGetErrorName(cudaGetLastError())); // DEBUG
cudaMemcpy(out_data, cuda_data, n, cudaMemcpyDeviceToHost);
cudaFree(cuda_data);
free(host_mat);
cudaUnbindTexture(tex_ref);
cudaFreeArray(cuArray);
break;
}
case OFFSET_CUBEMAP_32:
case OFFSET_CUBEMAP:
{
cudaArray* cuArray;
cudaMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
cudaMemcpyToArray(cuArray, 0, 0, data, width * height, cudaMemcpyHostToDevice);
cudaBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = cudaAddressModeWrap;
tex_ref.addressMode[1] = cudaAddressModeWrap;
tex_ref.filterMode = cudaFilterModePoint; // "cudaFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
cudaMemcpyToSymbol(mat, host_mat, PARAM_NUM * sizeof(float));
int n = target_width * target_height;
float rw = 1.0 / target_width, rh = 1.0 / target_height;
uint8_t* cuda_data = NULL;
cudaMalloc((void**)&cuda_data, n);
int nt = min(max(target_width / 3, target_height >> 1), THREAD_LIMIT);
CUDATransformKernelOCMP<<<n / nt, nt>>>(cuda_data,
target_width,
rw, rh, ecoef,
x, y, z, n);
cudaDeviceSynchronize();
// fprintf(stderr, "%s\n", cudaGetErrorName(cudaGetLastError())); // DEBUG
cudaMemcpy(out_data, cuda_data, n, cudaMemcpyDeviceToHost);
cudaFree(cuda_data);
free(host_mat);
cudaUnbindTexture(tex_ref);
cudaFreeArray(cuArray);
break;
}
case EAC_32:
case EAC:
{
cudaArray* cuArray;
cudaMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
cudaMemcpyToArray(cuArray, 0, 0, data, width * height, cudaMemcpyHostToDevice);
cudaBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = cudaAddressModeWrap;
tex_ref.addressMode[1] = cudaAddressModeWrap;
tex_ref.filterMode = cudaFilterModePoint; // "cudaFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
cudaMemcpyToSymbol(mat, host_mat, PARAM_NUM * sizeof(float));
int n = target_width * target_height;
float rw = 1.0 / target_width, rh = 1.0 / target_height;
uint8_t* cuda_data = NULL;
cudaMalloc((void**)&cuda_data, n);
int nt = min(max(target_width / 3, target_height >> 1), THREAD_LIMIT);
CUDATransformKernelEAC<<<n / nt, nt>>>(cuda_data,
target_width,
rw, rh,
ecoef, n);
cudaDeviceSynchronize();
// fprintf(stderr, "%s\n", cudaGetErrorName(cudaGetLastError())); // DEBUG
cudaMemcpy(out_data, cuda_data, n, cudaMemcpyDeviceToHost);
cudaFree(cuda_data);
free(host_mat);
cudaUnbindTexture(tex_ref);
cudaFreeArray(cuArray);
break;
}
case OFFSET_EAC_32:
case OFFSET_EAC:
{
cudaArray* cuArray;
cudaMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
cudaMemcpyToArray(cuArray, 0, 0, data, width * height, cudaMemcpyHostToDevice);
cudaBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = cudaAddressModeWrap;
tex_ref.addressMode[1] = cudaAddressModeWrap;
tex_ref.filterMode = cudaFilterModePoint; // "cudaFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
cudaMemcpyToSymbol(mat, host_mat, PARAM_NUM * sizeof(float));
int n = target_width * target_height;
float rw = 1.0 / target_width, rh = 1.0 / target_height;
uint8_t* cuda_data = NULL;
cudaMalloc((void**)&cuda_data, n);
int nt = min(max(target_width / 3, target_height >> 1), THREAD_LIMIT);
CUDATransformKernelOEAC<<<n / nt, nt>>>(cuda_data,
target_width,
rw, rh, ecoef,
x, y, z, n);
cudaDeviceSynchronize();
// fprintf(stderr, "%s\n", cudaGetErrorName(cudaGetLastError())); // DEBUG
cudaMemcpy(out_data, cuda_data, n, cudaMemcpyDeviceToHost);
cudaFree(cuda_data);
free(host_mat);
cudaUnbindTexture(tex_ref);
cudaFreeArray(cuArray);
break;
}
case BASEBALL_EQUIRECT_32:
case BASEBALL_EQUIRECT:
{
cudaArray* cuArray;
cudaMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
cudaMemcpyToArray(cuArray, 0, 0, data, width * height, cudaMemcpyHostToDevice);
cudaBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = cudaAddressModeWrap;
tex_ref.addressMode[1] = cudaAddressModeWrap;
tex_ref.filterMode = cudaFilterModePoint; // "cudaFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(2 * PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
RotationMatrix(Radians(180.0 + yaw), Radians(-pitch), Radians(-90.0 + roll), PARAM_NUM + host_mat);
cudaMemcpyToSymbol(mat, host_mat, 2 * PARAM_NUM * sizeof(float));
int nh = target_width * target_height >> 1;
float rw = 1.0 / target_width, rhh = 2.0 / target_height;
uint8_t* cuda_data = NULL;
cudaMalloc((void**)&cuda_data, nh << 1);
int nt = min(max(target_width / 3, target_height >> 1), THREAD_LIMIT);
CUDATransformKernelBEP<<<nh / nt, nt>>>(cuda_data,
target_width,
rw, rhh, ecoef,
0, nh);
CUDATransformKernelBEP<<<nh / nt, nt>>>(nh + cuda_data,
target_width,
rw, rhh, ecoef,
PARAM_NUM, nh);
cudaDeviceSynchronize();
// fprintf(stderr, "%s\n", cudaGetErrorName(cudaGetLastError())); // DEBUG
cudaMemcpy(out_data, cuda_data, nh << 1, cudaMemcpyDeviceToHost);
cudaFree(cuda_data);
free(host_mat);
cudaUnbindTexture(tex_ref);
cudaFreeArray(cuArray);
break;
}
case SHIFT_BASEBALL_EQUIRECT_21:
{
cudaArray* cuArray;
cudaMallocArray(&cuArray, &tex_ref.channelDesc, width, height);
cudaMemcpyToArray(cuArray, 0, 0, data, width * height, cudaMemcpyHostToDevice);
cudaBindTextureToArray(tex_ref, cuArray);
tex_ref.addressMode[0] = cudaAddressModeWrap;
tex_ref.addressMode[1] = cudaAddressModeWrap;
tex_ref.filterMode = cudaFilterModePoint; // "cudaFilterModeLinear" ONLY USED WITH FLOAT TEXTURE
tex_ref.normalized = true;
float* host_mat = (float*)malloc(2 * PARAM_NUM * sizeof(float));
RotationMatrix(Radians(yaw), Radians(pitch), Radians(-roll), host_mat);
RotationMatrix(Radians(180.0 + yaw), Radians(-pitch), Radians(-90.0 + roll), PARAM_NUM + host_mat);
cudaMemcpyToSymbol(mat, host_mat, 2 * PARAM_NUM * sizeof(float));
int target_height_up = target_width / 3;
int nu = target_width * target_height_up;
int target_height_dn = target_height - target_height_up;
int nd = target_width * target_height_dn;
float rw = 1.0 / target_width, rhu = 1.0 / target_height_up, rhd = 1.0 / target_height_dn;
uint8_t* cuda_data = NULL;
cudaMalloc((void**)&cuda_data, nu + nd);
int nt = min(max(target_width / 3, target_height_up), THREAD_LIMIT);
CUDATransformKernelBEP<<<nu / nt, nt>>>(cuda_data,
target_width,
rw, rhu, ecoef,
0, nu);
CUDATransformKernelBEP<<<nd / nt, nt>>>(nu + cuda_data,
target_width,
rw, rhd, ecoef,
PARAM_NUM, nd);
cudaDeviceSynchronize();
// fprintf(stderr, "%s\n", cudaGetErrorName(cudaGetLastError())); // DEBUG
cudaMemcpy(out_data, cuda_data, nu + nd, cudaMemcpyDeviceToHost);
cudaFree(cuda_data);
free(host_mat);
cudaUnbindTexture(tex_ref);
cudaFreeArray(cuArray);
break;
}
default:
fprintf(stderr, "CUDATransformWrapper(): Invalid Flag\n");
return 1;
}
return 0;
}
int CUDARenderWrapper(const uint8_t* data,
int width,
int height,
int target_width,
int target_height,
float yaw,
float pitch,
float roll,
float x,
float y,
float z,
float ecoef,
uint8_t* out_data,
int flag) {
return 0;
}
|
93bf716c90324f60af454841438f9ae4037e6f4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "config.h"
texture<float,1,hipReadModeElementType> tex_vec;
texture<int,1,hipReadModeElementType> tex_cols;
texture<float,1,hipReadModeElementType> tex_val;
__global__ void
spmv_kernel(const float* __restrict__ val,
const int* __restrict__ cols,
const int * rowDelimiters,
const float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
__shared__ volatile float partialSums[BLOCK_SIZE];
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = tex1Dfetch(tex_cols,j);
mySum += tex1Dfetch(tex_val,j) * tex1Dfetch(tex_vec,col);//vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
hipSetDevice(1);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = MSIZE * (BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * (spmv_numRows / SFactor); // 1% of entries will be non-zero
float maxval = 200.0;
hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float));
hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int));
hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice);
hipBindTexture(0,tex_vec,d_spmv_vec,spmv_numRows * sizeof(float));
hipBindTexture(0,tex_cols,d_spmv_cols,spmv_nItems * sizeof(int));
hipBindTexture(0,tex_val,d_spmv_val,spmv_nItems * sizeof(float));
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(BLOCK_SIZE / WARP_SIZE));
// warm up the GPU
for (int i=0; i<5; i++) // repeat 10 times
{
hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(BLOCK_SIZE), 0, 0,
d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
}
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
for (int i=0; i<ITERATIONS; i++) // repeat 10 times
{
hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(BLOCK_SIZE), 0, 0,
d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
}
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost);
// spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
| 93bf716c90324f60af454841438f9ae4037e6f4b.cu | #include "config.h"
texture<float,1,cudaReadModeElementType> tex_vec;
texture<int,1,cudaReadModeElementType> tex_cols;
texture<float,1,cudaReadModeElementType> tex_val;
__global__ void
spmv_kernel(const float* __restrict__ val,
const int* __restrict__ cols,
const int * rowDelimiters,
const float * vec,
const int dim, float * out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID within warp
int id = t & (WARP_SIZE-1);
int warpsPerBlock = blockDim.x / WARP_SIZE;
// One row per warp
int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE);
__shared__ volatile float partialSums[BLOCK_SIZE];
if (myRow < dim)
{
int warpStart = rowDelimiters[myRow];
int warpEnd = rowDelimiters[myRow+1];
float mySum = 0;
for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE)
{
int col = tex1Dfetch(tex_cols,j);
mySum += tex1Dfetch(tex_val,j) * tex1Dfetch(tex_vec,col);//vec[col];
}
partialSums[t] = mySum;
// Reduce partial sums
if (id < 16) partialSums[t] += partialSums[t+16];
if (id < 8) partialSums[t] += partialSums[t+ 8];
if (id < 4) partialSums[t] += partialSums[t+ 4];
if (id < 2) partialSums[t] += partialSums[t+ 2];
if (id < 1) partialSums[t] += partialSums[t+ 1];
// Write result
if (id == 0)
{
out[myRow] = partialSums[t];
}
}
}
int main(int argc, char **argv) {
cudaSetDevice(1);
srand(2013);
float *h_spmv_val, *h_spmv_valPad;
int *h_spmv_cols, *h_spmv_colsPad;
int *h_rowDelimiters, *h_rowDelimitersPad;
float *h_spmv_vec, *h_spmv_out, *spmv_refOut;
int spmv_nItems, nItemsPadded, spmv_numRows;
spmv_numRows = MSIZE * (BLOCK_SIZE/WARP_SIZE);
spmv_nItems = spmv_numRows * (spmv_numRows / SFactor); // 1% of entries will be non-zero
float maxval = 200.0;
cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float));
cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int));
cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_val, spmv_nItems, maxval);
initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows);
// Set up remaining host data
int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR);
cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float)) ;
spmv_refOut = new float[spmv_numRows];
cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int));
fill(h_spmv_vec, spmv_numRows, maxval);
cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float));
convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad,
&h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded);
// Compute reference solution
spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut);
float *d_spmv_val, *d_spmv_vec, *d_spmv_out;
int *d_spmv_cols, *d_rowDelimiters;
// Allocate device memory
cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float));
cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int));
cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float));
cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float));
cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int));
// Transfer data to device
cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice);
cudaBindTexture(0,tex_vec,d_spmv_vec,spmv_numRows * sizeof(float));
cudaBindTexture(0,tex_cols,d_spmv_cols,spmv_nItems * sizeof(int));
cudaBindTexture(0,tex_val,d_spmv_val,spmv_nItems * sizeof(float));
// Setup thread configuration
int spmv_grid = (int) ceil(spmv_numRows / (float)(BLOCK_SIZE / WARP_SIZE));
// warm up the GPU
for (int i=0; i<5; i++) // repeat 10 times
{
spmv_kernel <<<spmv_grid, BLOCK_SIZE>>>
(d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
}
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
for (int i=0; i<ITERATIONS; i++) // repeat 10 times
{
spmv_kernel <<<spmv_grid, BLOCK_SIZE>>>
(d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out);
}
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time/ITERATIONS << endl;
cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost);
// spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows);
return 0;
}
|
937c20b4e2d2dc47911a432cad72c12387a7103d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <glew/glew.h>
#include <GL/freeglut.h>
#include <GL/GL.h>
#include <GL/GLU.h>
#include <GL/glut.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_gl_interop.h"
const int WIDTH = 1920;
const int HEIGHT = 1080;
const int COUNT = 30000000;
//float4* positions;
float2* velocity;
float2* xy;
__global__ void simulateFrame(float* positions, float2* velocity, float2* xy, float delta, float particleRadius)
{
delta /= 1000.0;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= COUNT) return;
xy[idx].x += velocity[idx].x * delta;
xy[idx].y += velocity[idx].y * delta;
if (xy[idx].x < 0 + particleRadius) {
velocity[idx].x *= -1;
xy[idx].x *= -1;
}
if (xy[idx].x + particleRadius >= WIDTH) {
velocity[idx].x *= -1;
xy[idx].x -= 2 * ((xy[idx].x + particleRadius) - WIDTH);
}
if (xy[idx].y < 0 + particleRadius) {
velocity[idx].y *= -1;
xy[idx].y *= -1;
}
if (xy[idx].y + particleRadius >= HEIGHT) {
velocity[idx].y *= -1;
xy[idx].y -= 2 * ((xy[idx].y + particleRadius) - HEIGHT);
}
positions[idx * 3] = xy[idx].x;
positions[idx * 3 + 1] = xy[idx].y;
}
__global__ void setVal(float2* arr, float2 val) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= COUNT) return;
arr[idx] = val;
}
void initFrame();
void displayFrame();
GLuint positionsVBO;
struct cudaGraphicsResource* positionsVBO_CUDA;
int main(int argv, char ** argc)
{
float2* h_velocity = new float2[COUNT];
for (int idx = 0; idx < COUNT; idx++)
{
h_velocity[idx].x = rand() % 200 + 10;
if (rand() % 2)
h_velocity[idx].x *= -1;
h_velocity[idx].y = rand() % 200 + 10;
if (rand() % 2)
h_velocity[idx].y *= -1;
}
hipMalloc(&velocity, COUNT * sizeof(float2));
hipMemcpy(velocity, h_velocity, COUNT * sizeof(float2), hipMemcpyHostToDevice);
hipMalloc(&xy, COUNT * sizeof(float2));
hipLaunchKernelGGL(( setVal), dim3((COUNT+1023)/1024), dim3(1024), 0, 0, xy, make_float2(100, 100));
//hipGLSetGLDevice(0);
glutInit(&argv, argc);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA);
glutInitWindowPosition(0, 0);
glutInitWindowSize(WIDTH, HEIGHT);
glutCreateWindow("Particles");
glewInit();
initFrame();
glutDisplayFunc(displayFrame);
glutMainLoop();
return 0;
}
GLfloat* positions;
int lastTime = 0;
void initFrame(){
glClearColor(0.0, 0.0, 0.0, 0.0);
glPointSize(4.0);
glLineWidth(2.0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, WIDTH, 0.0, HEIGHT);
glGenBuffers(1, &positionsVBO);
glBindBuffer(GL_ARRAY_BUFFER, positionsVBO);
unsigned int size = WIDTH * HEIGHT * 4 * sizeof(GLfloat);
glBufferData(GL_ARRAY_BUFFER, size, positions, GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
hipGraphicsGLRegisterBuffer(&positionsVBO_CUDA, positionsVBO, hipGraphicsMapFlagsWriteDiscard);
lastTime = glutGet(GLUT_ELAPSED_TIME);
}
//void displayFrame() {
// glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// glColor3f(1.0, 0.0, 1.0);
//
// hipGraphicsMapResources(1, &positionsVBO_CUDA, 0);
// size_t num_bytes;
// hipGraphicsResourceGetMappedPointer((void**)&positions, &num_bytes, positionsVBO_CUDA);
// // Execute kernel
//hipLaunchKernelGGL(( simulateFrame) , dim3((COUNT + 1023) / 1024), dim3(1024) , 0, 0, positions, velocity, xy, glutGet(GLUT_ELAPSED_TIME), 1.0);
// // Unmap buffer object
// hipGraphicsUnmapResources(1, &positionsVBO_CUDA, 0);
// // Render from buffer object
// glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// glBindBuffer(GL_ARRAY_BUFFER, positionsVBO);
// glVertexPointer(4, GL_FLOAT, 0, 0);
// glEnableClientState(GL_VERTEX_ARRAY);
// glDrawArrays(GL_POINTS, 0, WIDTH * HEIGHT);
// glDisableClientState(GL_VERTEX_ARRAY);
// // Swap buffers
// glutSwapBuffers();
// glutPostRedisplay();
// /*
// glBegin(GL_POINTS);
// glVertex2f(10.0, 10.0);
// glVertex2f(10.0, 30.0);
// glEnd();
// glFlush();
// */
//}
void displayFrame() {
int ms = glutGet(GLUT_ELAPSED_TIME) - lastTime;
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glColor3f(1.0, 0.0, 1.0);
hipGraphicsMapResources(1, &positionsVBO_CUDA, 0);
size_t num_bytes;
hipGraphicsResourceGetMappedPointer((void**)&positions, &num_bytes, positionsVBO_CUDA);
// Execute kernel
lastTime = glutGet(GLUT_ELAPSED_TIME);
hipLaunchKernelGGL(( simulateFrame) , dim3((COUNT + 1023) / 1024), dim3(1024) , 0, 0, positions, velocity, xy, ms, 4.0);
// Unmap buffer object
hipGraphicsUnmapResources(1, &positionsVBO_CUDA, 0);
// Render from buffer object
glBindBuffer(GL_ARRAY_BUFFER, positionsVBO);
glVertexPointer(3, GL_FLOAT, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glColor3f(1.0, 1.0, 1.0);
glDrawArrays(GL_POINTS, 0, COUNT);
glDisableClientState(GL_VERTEX_ARRAY);
// Swap buffers
glutSwapBuffers();
glutPostRedisplay();
/*
glBegin(GL_POINTS);
glVertex2f(10.0, 10.0);
glVertex2f(10.0, 30.0);
glEnd();
glFlush();
*/
} | 937c20b4e2d2dc47911a432cad72c12387a7103d.cu | #include <stdio.h>
#include <glew/glew.h>
#include <GL/freeglut.h>
#include <GL/GL.h>
#include <GL/GLU.h>
#include <GL/glut.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_gl_interop.h"
const int WIDTH = 1920;
const int HEIGHT = 1080;
const int COUNT = 30000000;
//float4* positions;
float2* velocity;
float2* xy;
__global__ void simulateFrame(float* positions, float2* velocity, float2* xy, float delta, float particleRadius)
{
delta /= 1000.0;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= COUNT) return;
xy[idx].x += velocity[idx].x * delta;
xy[idx].y += velocity[idx].y * delta;
if (xy[idx].x < 0 + particleRadius) {
velocity[idx].x *= -1;
xy[idx].x *= -1;
}
if (xy[idx].x + particleRadius >= WIDTH) {
velocity[idx].x *= -1;
xy[idx].x -= 2 * ((xy[idx].x + particleRadius) - WIDTH);
}
if (xy[idx].y < 0 + particleRadius) {
velocity[idx].y *= -1;
xy[idx].y *= -1;
}
if (xy[idx].y + particleRadius >= HEIGHT) {
velocity[idx].y *= -1;
xy[idx].y -= 2 * ((xy[idx].y + particleRadius) - HEIGHT);
}
positions[idx * 3] = xy[idx].x;
positions[idx * 3 + 1] = xy[idx].y;
}
__global__ void setVal(float2* arr, float2 val) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= COUNT) return;
arr[idx] = val;
}
void initFrame();
void displayFrame();
GLuint positionsVBO;
struct cudaGraphicsResource* positionsVBO_CUDA;
int main(int argv, char ** argc)
{
float2* h_velocity = new float2[COUNT];
for (int idx = 0; idx < COUNT; idx++)
{
h_velocity[idx].x = rand() % 200 + 10;
if (rand() % 2)
h_velocity[idx].x *= -1;
h_velocity[idx].y = rand() % 200 + 10;
if (rand() % 2)
h_velocity[idx].y *= -1;
}
cudaMalloc(&velocity, COUNT * sizeof(float2));
cudaMemcpy(velocity, h_velocity, COUNT * sizeof(float2), cudaMemcpyHostToDevice);
cudaMalloc(&xy, COUNT * sizeof(float2));
setVal<<<(COUNT+1023)/1024, 1024>>>(xy, make_float2(100, 100));
//cudaGLSetGLDevice(0);
glutInit(&argv, argc);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA);
glutInitWindowPosition(0, 0);
glutInitWindowSize(WIDTH, HEIGHT);
glutCreateWindow("Particles");
glewInit();
initFrame();
glutDisplayFunc(displayFrame);
glutMainLoop();
return 0;
}
GLfloat* positions;
int lastTime = 0;
void initFrame(){
glClearColor(0.0, 0.0, 0.0, 0.0);
glPointSize(4.0);
glLineWidth(2.0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, WIDTH, 0.0, HEIGHT);
glGenBuffers(1, &positionsVBO);
glBindBuffer(GL_ARRAY_BUFFER, positionsVBO);
unsigned int size = WIDTH * HEIGHT * 4 * sizeof(GLfloat);
glBufferData(GL_ARRAY_BUFFER, size, positions, GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
cudaGraphicsGLRegisterBuffer(&positionsVBO_CUDA, positionsVBO, cudaGraphicsMapFlagsWriteDiscard);
lastTime = glutGet(GLUT_ELAPSED_TIME);
}
//void displayFrame() {
// glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// glColor3f(1.0, 0.0, 1.0);
//
// cudaGraphicsMapResources(1, &positionsVBO_CUDA, 0);
// size_t num_bytes;
// cudaGraphicsResourceGetMappedPointer((void**)&positions, &num_bytes, positionsVBO_CUDA);
// // Execute kernel
// simulateFrame <<<(COUNT + 1023) / 1024, 1024 >>>(positions, velocity, xy, glutGet(GLUT_ELAPSED_TIME), 1.0);
// // Unmap buffer object
// cudaGraphicsUnmapResources(1, &positionsVBO_CUDA, 0);
// // Render from buffer object
// glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// glBindBuffer(GL_ARRAY_BUFFER, positionsVBO);
// glVertexPointer(4, GL_FLOAT, 0, 0);
// glEnableClientState(GL_VERTEX_ARRAY);
// glDrawArrays(GL_POINTS, 0, WIDTH * HEIGHT);
// glDisableClientState(GL_VERTEX_ARRAY);
// // Swap buffers
// glutSwapBuffers();
// glutPostRedisplay();
// /*
// glBegin(GL_POINTS);
// glVertex2f(10.0, 10.0);
// glVertex2f(10.0, 30.0);
// glEnd();
// glFlush();
// */
//}
void displayFrame() {
int ms = glutGet(GLUT_ELAPSED_TIME) - lastTime;
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glColor3f(1.0, 0.0, 1.0);
cudaGraphicsMapResources(1, &positionsVBO_CUDA, 0);
size_t num_bytes;
cudaGraphicsResourceGetMappedPointer((void**)&positions, &num_bytes, positionsVBO_CUDA);
// Execute kernel
lastTime = glutGet(GLUT_ELAPSED_TIME);
simulateFrame <<<(COUNT + 1023) / 1024, 1024 >>>(positions, velocity, xy, ms, 4.0);
// Unmap buffer object
cudaGraphicsUnmapResources(1, &positionsVBO_CUDA, 0);
// Render from buffer object
glBindBuffer(GL_ARRAY_BUFFER, positionsVBO);
glVertexPointer(3, GL_FLOAT, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glColor3f(1.0, 1.0, 1.0);
glDrawArrays(GL_POINTS, 0, COUNT);
glDisableClientState(GL_VERTEX_ARRAY);
// Swap buffers
glutSwapBuffers();
glutPostRedisplay();
/*
glBegin(GL_POINTS);
glVertex2f(10.0, 10.0);
glVertex2f(10.0, 30.0);
glEnd();
glFlush();
*/
} |
9bc2b0ba9b71782400bd4c35bd67faa9a39def1f.hip | // !!! This is a file automatically generated by hipify!!!
/**
* The MIT License (MIT)
*
* Copyright (c) 2016 Kyle Hollins Wray, University of Massachusetts
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nova/mdp/utilities/mdp_model_gpu.h>
#include <stdio.h>
#include <nova/error_codes.h>
#include <nova/constants.h>
namespace nova {
int mdp_initialize_gpu(MDP *mdp)
{
if (mdp == nullptr) {
fprintf(stderr, "Error[mdp_initialize_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
int result = 0;
result += mdp_initialize_successors_gpu(mdp);
result += mdp_initialize_state_transitions_gpu(mdp);
result += mdp_initialize_rewards_gpu(mdp);
if (mdp->ng > 0) {
result += mdp_initialize_goals_gpu(mdp);
}
return result;
}
int mdp_uninitialize_gpu(MDP *mdp)
{
if (mdp == nullptr) {
fprintf(stderr, "Error[mdp_uninitialize_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
int result = 0;
result += mdp_uninitialize_successors_gpu(mdp);
result += mdp_uninitialize_state_transitions_gpu(mdp);
result += mdp_uninitialize_rewards_gpu(mdp);
if (mdp->ng > 0) {
result += mdp_uninitialize_goals_gpu(mdp);
}
return result;
}
int mdp_initialize_successors_gpu(MDP *mdp)
{
// Ensure the data is valid.
if (mdp == nullptr || mdp->n == 0 || mdp->m == 0 || mdp->ns == 0 || mdp->S == nullptr) {
fprintf(stderr, "Error[mdp_initialize_successors_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (hipMalloc(&mdp->d_S, mdp->n * mdp->m * mdp->ns * sizeof(int)) != hipSuccess) {
fprintf(stderr, "Error[mdp_initialize_successors_gpu]: %s\n",
"Failed to allocate device-side memory for the successor states.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (hipMemcpy(mdp->d_S, mdp->S, mdp->n * mdp->m * mdp->ns * sizeof(int),
hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "Error[mdp_initialize_successors_gpu]: %s\n",
"Failed to copy memory from host to device for the successor states.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int mdp_uninitialize_successors_gpu(MDP *mdp)
{
if (mdp == nullptr) {
fprintf(stderr, "Error[mdp_uninitialize_successors_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (mdp->d_S != nullptr) {
if (hipFree(mdp->d_S) != hipSuccess) {
fprintf(stderr, "Error[mdp_uninitialize_successors_gpu]: %s\n",
"Failed to free device-side memory for the successor states.");
return NOVA_ERROR_DEVICE_FREE;
}
}
mdp->d_S = nullptr;
return NOVA_SUCCESS;
}
int mdp_initialize_state_transitions_gpu(MDP *mdp)
{
// Ensure the data is valid.
if (mdp == nullptr || mdp->n == 0 || mdp->m == 0 || mdp->ns == 0 || mdp->T == nullptr) {
fprintf(stderr, "Error[mdp_initialize_state_transitions_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (hipMalloc(&mdp->d_T, mdp->n * mdp->m * mdp->ns * sizeof(float)) != hipSuccess) {
fprintf(stderr, "Error[mdp_initialize_state_transitions_gpu]: %s\n",
"Failed to allocate device-side memory for the state transitions.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (hipMemcpy(mdp->d_T, mdp->T, mdp->n * mdp->m * mdp->ns * sizeof(float),
hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "Error[nova_mdp_pbvi_initialize_state_transitions]: %s\n",
"Failed to copy memory from host to device for the state transitions.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int mdp_uninitialize_state_transitions_gpu(MDP *mdp)
{
if (mdp == nullptr) {
fprintf(stderr, "Error[mdp_uninitialize_state_transitions_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (mdp->d_T != nullptr) {
if (hipFree(mdp->d_T) != hipSuccess) {
fprintf(stderr, "Error[mdp_uninitialize_state_transitions_gpu]: %s\n",
"Failed to free device-side memory for the state transitions.");
return NOVA_ERROR_DEVICE_FREE;
}
}
mdp->d_T = nullptr;
return NOVA_SUCCESS;
}
int mdp_initialize_rewards_gpu(MDP *mdp)
{
// Ensure the data is valid.
if (mdp == nullptr || mdp->n == 0 || mdp->m == 0 || mdp->R == nullptr) {
fprintf(stderr, "Error[mdp_initialize_rewards_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (hipMalloc(&mdp->d_R, mdp->n * mdp->m * sizeof(float)) != hipSuccess) {
fprintf(stderr, "Error[mdp_initialize_rewards_gpu]: %s\n",
"Failed to allocate device-side memory for the rewards.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (hipMemcpy(mdp->d_R, mdp->R, mdp->n * mdp->m * sizeof(float),
hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "Error[mdp_initialize_rewards_gpu]: %s\n",
"Failed to copy memory from host to device for the rewards.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int mdp_uninitialize_rewards_gpu(MDP *mdp)
{
if (mdp == nullptr) {
fprintf(stderr, "Error[mdp_uninitialize_rewards_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (mdp->d_R != nullptr) {
if (hipFree(mdp->d_R) != hipSuccess) {
fprintf(stderr, "Error[mdp_uninitialize_rewards_gpu]: %s\n",
"Failed to free device-side memory for the rewards.");
return NOVA_ERROR_DEVICE_FREE;
}
}
mdp->d_R = nullptr;
return NOVA_SUCCESS;
}
int mdp_initialize_goals_gpu(MDP *mdp)
{
// Ensure the data is valid.
if (mdp == nullptr || mdp->ng == 0 || mdp->goals == nullptr) {
fprintf(stderr, "Error[mdp_initialize_goals_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (hipMalloc(&mdp->d_goals, mdp->ng * sizeof(unsigned int)) != hipSuccess) {
fprintf(stderr, "Error[mdp_initialize_goals_gpu]: %s\n",
"Failed to allocate device-side memory for the goals.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (hipMemcpy(mdp->d_goals, mdp->goals, mdp->ng * sizeof(unsigned int),
hipMemcpyHostToDevice) != hipSuccess) {
fprintf(stderr, "Error[mdp_initialize_goals_gpu]: %s\n",
"Failed to copy memory from host to device for the goals.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int mdp_uninitialize_goals_gpu(MDP *mdp)
{
if (mdp == nullptr) {
fprintf(stderr, "Error[mdp_uninitialize_goals_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (mdp->d_goals != nullptr) {
if (hipFree(mdp->d_goals) != hipSuccess) {
fprintf(stderr, "Error[mdp_uninitialize_goals_gpu]: %s\n",
"Failed to free device-side memory for the goals.");
return NOVA_ERROR_DEVICE_FREE;
}
}
mdp->d_goals = nullptr;
return NOVA_SUCCESS;
}
}; // namespace nova
| 9bc2b0ba9b71782400bd4c35bd67faa9a39def1f.cu | /**
* The MIT License (MIT)
*
* Copyright (c) 2016 Kyle Hollins Wray, University of Massachusetts
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <nova/mdp/utilities/mdp_model_gpu.h>
#include <stdio.h>
#include <nova/error_codes.h>
#include <nova/constants.h>
namespace nova {
int mdp_initialize_gpu(MDP *mdp)
{
if (mdp == nullptr) {
fprintf(stderr, "Error[mdp_initialize_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
int result = 0;
result += mdp_initialize_successors_gpu(mdp);
result += mdp_initialize_state_transitions_gpu(mdp);
result += mdp_initialize_rewards_gpu(mdp);
if (mdp->ng > 0) {
result += mdp_initialize_goals_gpu(mdp);
}
return result;
}
int mdp_uninitialize_gpu(MDP *mdp)
{
if (mdp == nullptr) {
fprintf(stderr, "Error[mdp_uninitialize_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
int result = 0;
result += mdp_uninitialize_successors_gpu(mdp);
result += mdp_uninitialize_state_transitions_gpu(mdp);
result += mdp_uninitialize_rewards_gpu(mdp);
if (mdp->ng > 0) {
result += mdp_uninitialize_goals_gpu(mdp);
}
return result;
}
int mdp_initialize_successors_gpu(MDP *mdp)
{
// Ensure the data is valid.
if (mdp == nullptr || mdp->n == 0 || mdp->m == 0 || mdp->ns == 0 || mdp->S == nullptr) {
fprintf(stderr, "Error[mdp_initialize_successors_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (cudaMalloc(&mdp->d_S, mdp->n * mdp->m * mdp->ns * sizeof(int)) != cudaSuccess) {
fprintf(stderr, "Error[mdp_initialize_successors_gpu]: %s\n",
"Failed to allocate device-side memory for the successor states.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (cudaMemcpy(mdp->d_S, mdp->S, mdp->n * mdp->m * mdp->ns * sizeof(int),
cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Error[mdp_initialize_successors_gpu]: %s\n",
"Failed to copy memory from host to device for the successor states.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int mdp_uninitialize_successors_gpu(MDP *mdp)
{
if (mdp == nullptr) {
fprintf(stderr, "Error[mdp_uninitialize_successors_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (mdp->d_S != nullptr) {
if (cudaFree(mdp->d_S) != cudaSuccess) {
fprintf(stderr, "Error[mdp_uninitialize_successors_gpu]: %s\n",
"Failed to free device-side memory for the successor states.");
return NOVA_ERROR_DEVICE_FREE;
}
}
mdp->d_S = nullptr;
return NOVA_SUCCESS;
}
int mdp_initialize_state_transitions_gpu(MDP *mdp)
{
// Ensure the data is valid.
if (mdp == nullptr || mdp->n == 0 || mdp->m == 0 || mdp->ns == 0 || mdp->T == nullptr) {
fprintf(stderr, "Error[mdp_initialize_state_transitions_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (cudaMalloc(&mdp->d_T, mdp->n * mdp->m * mdp->ns * sizeof(float)) != cudaSuccess) {
fprintf(stderr, "Error[mdp_initialize_state_transitions_gpu]: %s\n",
"Failed to allocate device-side memory for the state transitions.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (cudaMemcpy(mdp->d_T, mdp->T, mdp->n * mdp->m * mdp->ns * sizeof(float),
cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Error[nova_mdp_pbvi_initialize_state_transitions]: %s\n",
"Failed to copy memory from host to device for the state transitions.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int mdp_uninitialize_state_transitions_gpu(MDP *mdp)
{
if (mdp == nullptr) {
fprintf(stderr, "Error[mdp_uninitialize_state_transitions_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (mdp->d_T != nullptr) {
if (cudaFree(mdp->d_T) != cudaSuccess) {
fprintf(stderr, "Error[mdp_uninitialize_state_transitions_gpu]: %s\n",
"Failed to free device-side memory for the state transitions.");
return NOVA_ERROR_DEVICE_FREE;
}
}
mdp->d_T = nullptr;
return NOVA_SUCCESS;
}
int mdp_initialize_rewards_gpu(MDP *mdp)
{
// Ensure the data is valid.
if (mdp == nullptr || mdp->n == 0 || mdp->m == 0 || mdp->R == nullptr) {
fprintf(stderr, "Error[mdp_initialize_rewards_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (cudaMalloc(&mdp->d_R, mdp->n * mdp->m * sizeof(float)) != cudaSuccess) {
fprintf(stderr, "Error[mdp_initialize_rewards_gpu]: %s\n",
"Failed to allocate device-side memory for the rewards.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (cudaMemcpy(mdp->d_R, mdp->R, mdp->n * mdp->m * sizeof(float),
cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Error[mdp_initialize_rewards_gpu]: %s\n",
"Failed to copy memory from host to device for the rewards.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int mdp_uninitialize_rewards_gpu(MDP *mdp)
{
if (mdp == nullptr) {
fprintf(stderr, "Error[mdp_uninitialize_rewards_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (mdp->d_R != nullptr) {
if (cudaFree(mdp->d_R) != cudaSuccess) {
fprintf(stderr, "Error[mdp_uninitialize_rewards_gpu]: %s\n",
"Failed to free device-side memory for the rewards.");
return NOVA_ERROR_DEVICE_FREE;
}
}
mdp->d_R = nullptr;
return NOVA_SUCCESS;
}
int mdp_initialize_goals_gpu(MDP *mdp)
{
// Ensure the data is valid.
if (mdp == nullptr || mdp->ng == 0 || mdp->goals == nullptr) {
fprintf(stderr, "Error[mdp_initialize_goals_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
// Allocate the memory on the device.
if (cudaMalloc(&mdp->d_goals, mdp->ng * sizeof(unsigned int)) != cudaSuccess) {
fprintf(stderr, "Error[mdp_initialize_goals_gpu]: %s\n",
"Failed to allocate device-side memory for the goals.");
return NOVA_ERROR_DEVICE_MALLOC;
}
// Copy the data from the host to the device.
if (cudaMemcpy(mdp->d_goals, mdp->goals, mdp->ng * sizeof(unsigned int),
cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Error[mdp_initialize_goals_gpu]: %s\n",
"Failed to copy memory from host to device for the goals.");
return NOVA_ERROR_MEMCPY_TO_DEVICE;
}
return NOVA_SUCCESS;
}
int mdp_uninitialize_goals_gpu(MDP *mdp)
{
if (mdp == nullptr) {
fprintf(stderr, "Error[mdp_uninitialize_goals_gpu]: %s\n", "Invalid input.");
return NOVA_ERROR_INVALID_DATA;
}
if (mdp->d_goals != nullptr) {
if (cudaFree(mdp->d_goals) != cudaSuccess) {
fprintf(stderr, "Error[mdp_uninitialize_goals_gpu]: %s\n",
"Failed to free device-side memory for the goals.");
return NOVA_ERROR_DEVICE_FREE;
}
}
mdp->d_goals = nullptr;
return NOVA_SUCCESS;
}
}; // namespace nova
|
7161892a46ccf8b123e4a90553bc369d1b887906.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <rocblas.h>
#include <hiprand/hiprand.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include "rbm.h"
#include "rbm_kernels.h"
#include "utils.h"
#define INFO true
#define DEBUG true
#define BLOCK_SIZE 1024
RBM::RBM(int visible, int hidden, float rate) {
numVisible = visible;
numHidden = hidden;
learningRate = rate;
int weightsNumber = (numVisible + 1) * (numHidden + 1); // +1 because of bias
checkCudaError(__LINE__, hipMalloc(&dWeights, weightsNumber * sizeof(float)));
checkCuRandError(__LINE__, hiprandCreateGenerator(&generator, HIPRAND_RNG_QUASI_DEFAULT));
checkCuRandError(__LINE__, hiprandGenerateNormal(generator, dWeights, weightsNumber, 0.0, 0.1));
if (INFO) {
std::cout << "Initial weights:" << std::endl;
printDeviceColumnMajorMatrix(dWeights, numVisible + 1, numHidden + 1);
}
checkCuBlasError(__LINE__, hipblasCreate(&handle));
std::cout << "RBM initialized" << std::endl;
}
RBM::~RBM() {
checkCudaError(__LINE__, hipFree(dWeights));
hipblasDestroy(handle);
hiprandDestroyGenerator(generator);
std::cout << "RBM destroyed" << std::endl;
}
float *RBM::hiddenActivationProbabilities(float *dVisibleUnitsStates, int examplesNumber) {
float *dHiddenUnitsActivationEnergy; // matrix of float values of dim exh
float *dHiddenUnitsActivationProbabilities; // matrix of [0,1] values of dim exh
int hiddenBufferSize = (numHidden + 1) * examplesNumber;
checkCudaError(__LINE__, hipMalloc(&dHiddenUnitsActivationEnergy, hiddenBufferSize * sizeof(float)));
checkCudaError(__LINE__, hipMalloc(&dHiddenUnitsActivationProbabilities, hiddenBufferSize * sizeof(float)));
if (DEBUG) std::cout << "Calculating hidden units activation energies" << std::endl;
const float alpha = 1;
const float beta = 0;
checkCuBlasError(__LINE__, hipblasSgemm(
handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
examplesNumber,
numHidden + 1,
numVisible + 1,
&alpha,
dVisibleUnitsStates,
examplesNumber, // lda
dWeights,
numVisible + 1, // ldb
&beta,
dHiddenUnitsActivationEnergy,
examplesNumber)); // ldc
if (DEBUG) printDeviceColumnMajorMatrix(dHiddenUnitsActivationEnergy, examplesNumber, numHidden + 1);
int blockNumber = hiddenBufferSize / BLOCK_SIZE + 1;
if (DEBUG) std::cout << "Calculating hidden probabilities " << BLOCK_SIZE << std::endl;
hipLaunchKernelGGL(( sigmoid), dim3(blockNumber), dim3(BLOCK_SIZE), 0, 0, dHiddenUnitsActivationEnergy, dHiddenUnitsActivationProbabilities, hiddenBufferSize);
checkCudaError(__LINE__);
if (DEBUG) printDeviceColumnMajorMatrix(dHiddenUnitsActivationProbabilities, examplesNumber, numHidden + 1);
checkCudaError(__LINE__, hipFree(dHiddenUnitsActivationEnergy));
return dHiddenUnitsActivationProbabilities;
}
float *RBM::visibleActivationProbabilities(float *dHiddenUnitsStates, int examplesNumber) {
float *dVisibleUnitsActivationEnergy; // matrix of float values of dim exv
float *dVisibleUnitsActivationProbabilities; // matrix of [0,1] values of dim exv
int visibleBufferSize = (numVisible + 1) * examplesNumber;
checkCudaError(__LINE__, hipMalloc(&dVisibleUnitsActivationEnergy, visibleBufferSize * sizeof(float)));
checkCudaError(__LINE__, hipMalloc(&dVisibleUnitsActivationProbabilities, visibleBufferSize * sizeof(float)));
if (DEBUG) std::cout << "Calculating visible units activation energies" << std::endl;
const float alpha = 1;
const float beta = 0;
checkCuBlasError(__LINE__, hipblasSgemm(
handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
examplesNumber,
numVisible + 1,
numHidden + 1,
&alpha,
dHiddenUnitsStates,
examplesNumber, // lda
dWeights,
numVisible + 1, // ldb
&beta,
dVisibleUnitsActivationEnergy,
examplesNumber)); // ldc
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsActivationEnergy, examplesNumber, numVisible + 1);
int blockNumber = visibleBufferSize / BLOCK_SIZE + 1;
if (DEBUG) std::cout << "Calculating visible probabilities" << std::endl;
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsActivationProbabilities, examplesNumber, numVisible + 1);
hipLaunchKernelGGL(( sigmoid), dim3(blockNumber), dim3(BLOCK_SIZE), 0, 0, dVisibleUnitsActivationEnergy, dVisibleUnitsActivationProbabilities, visibleBufferSize);
checkCudaError(__LINE__);
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsActivationProbabilities, examplesNumber, numVisible + 1);
checkCudaError(__LINE__, hipFree(dVisibleUnitsActivationEnergy));
return dVisibleUnitsActivationProbabilities;
}
float *RBM::computeAssociations(float *dVisibleUnitsActivationProbabilities, float *dHiddenUnitsActivationProbabilities, int examplesNumber) {
float *dAssociations; // vxh matrix
checkCudaError(__LINE__, hipMalloc(&dAssociations, (numVisible + 1) * (numHidden + 1) * sizeof(float))); // +1 because of bias
const float alpha = 1;
const float beta = 0;
checkCuBlasError(__LINE__, hipblasSgemm(
handle,
HIPBLAS_OP_T,
HIPBLAS_OP_N,
numVisible + 1,
numHidden + 1,
examplesNumber,
&alpha,
dVisibleUnitsActivationProbabilities,
examplesNumber, // lda
dHiddenUnitsActivationProbabilities,
examplesNumber, // ldb
&beta,
dAssociations,
numVisible + 1)); // ldc
if (DEBUG) std::cout << "Associations:" << std::endl;
if (DEBUG) printDeviceColumnMajorMatrix(dAssociations, numVisible + 1, numHidden + 1);
return dAssociations;
}
// a contrastive divergence (CD) learning algorithm with k=1; batched version
void RBM::train(float *hTrainingData, int examplesNumber, int maxEpochs) {
float hBias[examplesNumber]; // will be added as a first column of training data
std::fill_n(hBias, examplesNumber, 1.0);
float *dVisibleUnitsStates; // device copy of training data
float *dVisibleUnitsActivationProbabilities; // matrix of [0,1] of dimensions exv
float *dHiddenUnitsStates; // matrix of boolean values of dimensions exh
float *dPositiveHiddenUnitsActivationProbabilities; // matrix of [0,1] of dimensions exh
float *dNegativeHiddenUnitsActivationProbabilities; // matrix of [0,1] of dimensions exh
float *dPositiveAssociations; // matrix of dimensions vxh
float *dNegativeAssociations; // matrix of dimensions vxh
float *dRandom; // matrix of dimensions exh of random values [0,1]
int visibleBufferSize = (numVisible + 1) * examplesNumber;
int hiddenBufferSize = (numHidden + 1) * examplesNumber;
checkCudaError(__LINE__, hipMalloc(&dVisibleUnitsStates, visibleBufferSize * sizeof(float)));
checkCudaError(__LINE__, hipMalloc(&dHiddenUnitsStates, hiddenBufferSize * sizeof(float)));
checkCudaError(__LINE__, hipMalloc(&dRandom, hiddenBufferSize * sizeof(float)));
for (int e = 0; e < maxEpochs; e++) {
// a positive phase of the contrastive divergence
// copy bias to the first column
checkCudaError(__LINE__, hipMemcpy(dVisibleUnitsStates, hBias, examplesNumber * sizeof(float), hipMemcpyHostToDevice));
// copy training data to remaining cells
checkCudaError(__LINE__, hipMemcpy(&dVisibleUnitsStates[examplesNumber],
hTrainingData,
numVisible * examplesNumber * sizeof(float),
hipMemcpyHostToDevice));
if (DEBUG) std::cout << "Visible units states:" << std::endl;
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsStates, examplesNumber, numVisible + 1);
// calculate positive hidden activation probabilities
dPositiveHiddenUnitsActivationProbabilities = hiddenActivationProbabilities(dVisibleUnitsStates, examplesNumber);
if (DEBUG) std::cout << "Fixing hidden units activation probabilities by setting bias to the first column" << std::endl;
checkCudaError(__LINE__, hipMemcpy(dPositiveHiddenUnitsActivationProbabilities, hBias, examplesNumber * sizeof(float), hipMemcpyHostToDevice));
if (DEBUG) printDeviceColumnMajorMatrix(dPositiveHiddenUnitsActivationProbabilities, examplesNumber, numHidden + 1);
if (DEBUG) std::cout << "Calculating hidden unit states by sampling" << std::endl;
checkCuRandError(__LINE__, hiprandGenerateUniform(generator, dRandom, hiddenBufferSize));
int blockNumber = hiddenBufferSize / BLOCK_SIZE + 1;
hipLaunchKernelGGL(( greaterThan), dim3(blockNumber), dim3(BLOCK_SIZE), 0, 0, dPositiveHiddenUnitsActivationProbabilities, dRandom, dHiddenUnitsStates, examplesNumber * (numHidden + 1));
checkCudaError(__LINE__);
if (DEBUG) printDeviceColumnMajorMatrix(dHiddenUnitsStates, examplesNumber, numHidden + 1);
dPositiveAssociations = computeAssociations(dVisibleUnitsStates, dPositiveHiddenUnitsActivationProbabilities, examplesNumber);
// a negative (reconstruction) phase of the contrastive divergence
// calculate negative visible probabilities
dVisibleUnitsActivationProbabilities = visibleActivationProbabilities(dHiddenUnitsStates, examplesNumber);
if (DEBUG) std::cout << "Visible Units Activation Probabilities:" << std::endl;
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsActivationProbabilities, examplesNumber, numVisible + 1);
if (DEBUG) std::cout << "Fixing visible units activation probabilities by setting bias to the first column" << std::endl;
checkCudaError(__LINE__, hipMemcpy(dVisibleUnitsActivationProbabilities, hBias, examplesNumber * sizeof(float), hipMemcpyHostToDevice));
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsActivationProbabilities, examplesNumber, numVisible + 1);
// negative hidden probabilities
dNegativeHiddenUnitsActivationProbabilities = hiddenActivationProbabilities(dVisibleUnitsActivationProbabilities, examplesNumber);
if (DEBUG) std::cout << "Negative Hidden units activation probabilities:" << std::endl;
if (DEBUG) printDeviceColumnMajorMatrix(dNegativeHiddenUnitsActivationProbabilities, examplesNumber, numHidden + 1);
if (DEBUG) std::cout << "Calculating negative associations" << std::endl;
dNegativeAssociations = computeAssociations(dVisibleUnitsActivationProbabilities, dNegativeHiddenUnitsActivationProbabilities, examplesNumber);
if (DEBUG) std::cout << "Updating weights" << std::endl;
int weightsNumber = (numHidden + 1) * (numVisible + 1);
blockNumber = weightsNumber / BLOCK_SIZE + 1;
hipLaunchKernelGGL(( updateWeight), dim3(blockNumber), dim3(BLOCK_SIZE), 0, 0, dWeights, dPositiveAssociations, dNegativeAssociations, weightsNumber, examplesNumber, learningRate);
checkCudaError(__LINE__);
if (DEBUG) printDeviceColumnMajorMatrix(dWeights, numVisible + 1, numHidden + 1);
blockNumber = visibleBufferSize / BLOCK_SIZE + 1;
if (DEBUG) std::cout << "Calculating error - squares of subtractions: " << std::endl;
// for memory efficiency we will write subtraction result to one of the input matrices (dVisibleUnitsStates)
hipLaunchKernelGGL(( subAndSquare), dim3(blockNumber), dim3(BLOCK_SIZE), 0, 0, dVisibleUnitsStates, dVisibleUnitsActivationProbabilities, visibleBufferSize);
checkCudaError(__LINE__);
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsStates, examplesNumber, numVisible + 1);
if (DEBUG) std::cout << "Calculation error - reducing sum:" << std::endl;
thrust::device_ptr<float> dVisibleUnitsStatesPtr(dVisibleUnitsStates);
float hError = thrust::reduce(dVisibleUnitsStatesPtr, dVisibleUnitsStatesPtr + visibleBufferSize, 0.0, thrust::plus<float>());
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsStates, examplesNumber, numVisible + 1);
std::cout << "Reconstruction error after epoch " << e + 1 << " is " << hError << std::endl;
checkCudaError(__LINE__, hipFree(dVisibleUnitsActivationProbabilities));
checkCudaError(__LINE__, hipFree(dPositiveHiddenUnitsActivationProbabilities));
checkCudaError(__LINE__, hipFree(dNegativeHiddenUnitsActivationProbabilities));
checkCudaError(__LINE__, hipFree(dPositiveAssociations));
checkCudaError(__LINE__, hipFree(dNegativeAssociations));
}
checkCudaError(__LINE__, hipFree(dRandom));
checkCudaError(__LINE__, hipFree(dVisibleUnitsStates));
checkCudaError(__LINE__, hipFree(dHiddenUnitsStates));
if (INFO) std::cout << "Learned weights:" << std::endl;
if (INFO) printDeviceColumnMajorMatrix(dWeights, numVisible + 1, numHidden + 1);
}
float *RBM::hiddenStates(float *hVisible) {
float *dVisible;
float *dHidden;
float *hHidden;
float *dRandom;
checkCudaError(__LINE__, hipMalloc(&dVisible, (numVisible + 1) * sizeof(float)));
checkCudaError(__LINE__, hipMalloc(&dHidden, (numHidden + 1) * sizeof(float)));
float bias = 1.0;
checkCudaError(__LINE__, hipMemcpy(dVisible, &bias, sizeof(float), hipMemcpyHostToDevice));
checkCudaError(__LINE__, hipMemcpy(&dVisible[1], hVisible, numVisible * sizeof(float), hipMemcpyHostToDevice)); // set bias
dHidden = hiddenActivationProbabilities(dVisible, 1);
// sampling
checkCudaError(__LINE__, hipMalloc(&dRandom, (numHidden + 1) * sizeof(float)));
checkCuRandError(__LINE__, hiprandGenerateUniform(generator, dRandom, numHidden + 1));
int blockNumber = (numHidden + 1) / BLOCK_SIZE + 1;
hipLaunchKernelGGL(( greaterThan), dim3(blockNumber), dim3(BLOCK_SIZE), 0, 0, dHidden, dRandom, dHidden, numHidden + 1);
checkCudaError(__LINE__);
hHidden = (float *) malloc(numHidden * sizeof(float));
hipMemcpy(hHidden, &dHidden[1], numHidden * sizeof(float), hipMemcpyDeviceToHost);
checkCudaError(__LINE__, hipFree(dHidden));
checkCudaError(__LINE__, hipFree(dVisible));
checkCudaError(__LINE__, hipFree(dRandom));
return hHidden;
}
| 7161892a46ccf8b123e4a90553bc369d1b887906.cu | #include <iostream>
#include <cublas_v2.h>
#include <curand.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include "rbm.h"
#include "rbm_kernels.h"
#include "utils.h"
#define INFO true
#define DEBUG true
#define BLOCK_SIZE 1024
RBM::RBM(int visible, int hidden, float rate) {
numVisible = visible;
numHidden = hidden;
learningRate = rate;
int weightsNumber = (numVisible + 1) * (numHidden + 1); // +1 because of bias
checkCudaError(__LINE__, cudaMalloc(&dWeights, weightsNumber * sizeof(float)));
checkCuRandError(__LINE__, curandCreateGenerator(&generator, CURAND_RNG_QUASI_DEFAULT));
checkCuRandError(__LINE__, curandGenerateNormal(generator, dWeights, weightsNumber, 0.0, 0.1));
if (INFO) {
std::cout << "Initial weights:" << std::endl;
printDeviceColumnMajorMatrix(dWeights, numVisible + 1, numHidden + 1);
}
checkCuBlasError(__LINE__, cublasCreate(&handle));
std::cout << "RBM initialized" << std::endl;
}
RBM::~RBM() {
checkCudaError(__LINE__, cudaFree(dWeights));
cublasDestroy(handle);
curandDestroyGenerator(generator);
std::cout << "RBM destroyed" << std::endl;
}
float *RBM::hiddenActivationProbabilities(float *dVisibleUnitsStates, int examplesNumber) {
float *dHiddenUnitsActivationEnergy; // matrix of float values of dim exh
float *dHiddenUnitsActivationProbabilities; // matrix of [0,1] values of dim exh
int hiddenBufferSize = (numHidden + 1) * examplesNumber;
checkCudaError(__LINE__, cudaMalloc(&dHiddenUnitsActivationEnergy, hiddenBufferSize * sizeof(float)));
checkCudaError(__LINE__, cudaMalloc(&dHiddenUnitsActivationProbabilities, hiddenBufferSize * sizeof(float)));
if (DEBUG) std::cout << "Calculating hidden units activation energies" << std::endl;
const float alpha = 1;
const float beta = 0;
checkCuBlasError(__LINE__, cublasSgemm(
handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
examplesNumber,
numHidden + 1,
numVisible + 1,
&alpha,
dVisibleUnitsStates,
examplesNumber, // lda
dWeights,
numVisible + 1, // ldb
&beta,
dHiddenUnitsActivationEnergy,
examplesNumber)); // ldc
if (DEBUG) printDeviceColumnMajorMatrix(dHiddenUnitsActivationEnergy, examplesNumber, numHidden + 1);
int blockNumber = hiddenBufferSize / BLOCK_SIZE + 1;
if (DEBUG) std::cout << "Calculating hidden probabilities " << BLOCK_SIZE << std::endl;
sigmoid<<<blockNumber, BLOCK_SIZE>>>(dHiddenUnitsActivationEnergy, dHiddenUnitsActivationProbabilities, hiddenBufferSize);
checkCudaError(__LINE__);
if (DEBUG) printDeviceColumnMajorMatrix(dHiddenUnitsActivationProbabilities, examplesNumber, numHidden + 1);
checkCudaError(__LINE__, cudaFree(dHiddenUnitsActivationEnergy));
return dHiddenUnitsActivationProbabilities;
}
float *RBM::visibleActivationProbabilities(float *dHiddenUnitsStates, int examplesNumber) {
float *dVisibleUnitsActivationEnergy; // matrix of float values of dim exv
float *dVisibleUnitsActivationProbabilities; // matrix of [0,1] values of dim exv
int visibleBufferSize = (numVisible + 1) * examplesNumber;
checkCudaError(__LINE__, cudaMalloc(&dVisibleUnitsActivationEnergy, visibleBufferSize * sizeof(float)));
checkCudaError(__LINE__, cudaMalloc(&dVisibleUnitsActivationProbabilities, visibleBufferSize * sizeof(float)));
if (DEBUG) std::cout << "Calculating visible units activation energies" << std::endl;
const float alpha = 1;
const float beta = 0;
checkCuBlasError(__LINE__, cublasSgemm(
handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
examplesNumber,
numVisible + 1,
numHidden + 1,
&alpha,
dHiddenUnitsStates,
examplesNumber, // lda
dWeights,
numVisible + 1, // ldb
&beta,
dVisibleUnitsActivationEnergy,
examplesNumber)); // ldc
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsActivationEnergy, examplesNumber, numVisible + 1);
int blockNumber = visibleBufferSize / BLOCK_SIZE + 1;
if (DEBUG) std::cout << "Calculating visible probabilities" << std::endl;
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsActivationProbabilities, examplesNumber, numVisible + 1);
sigmoid<<<blockNumber, BLOCK_SIZE>>>(dVisibleUnitsActivationEnergy, dVisibleUnitsActivationProbabilities, visibleBufferSize);
checkCudaError(__LINE__);
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsActivationProbabilities, examplesNumber, numVisible + 1);
checkCudaError(__LINE__, cudaFree(dVisibleUnitsActivationEnergy));
return dVisibleUnitsActivationProbabilities;
}
float *RBM::computeAssociations(float *dVisibleUnitsActivationProbabilities, float *dHiddenUnitsActivationProbabilities, int examplesNumber) {
float *dAssociations; // vxh matrix
checkCudaError(__LINE__, cudaMalloc(&dAssociations, (numVisible + 1) * (numHidden + 1) * sizeof(float))); // +1 because of bias
const float alpha = 1;
const float beta = 0;
checkCuBlasError(__LINE__, cublasSgemm(
handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
numVisible + 1,
numHidden + 1,
examplesNumber,
&alpha,
dVisibleUnitsActivationProbabilities,
examplesNumber, // lda
dHiddenUnitsActivationProbabilities,
examplesNumber, // ldb
&beta,
dAssociations,
numVisible + 1)); // ldc
if (DEBUG) std::cout << "Associations:" << std::endl;
if (DEBUG) printDeviceColumnMajorMatrix(dAssociations, numVisible + 1, numHidden + 1);
return dAssociations;
}
// a contrastive divergence (CD) learning algorithm with k=1; batched version
void RBM::train(float *hTrainingData, int examplesNumber, int maxEpochs) {
float hBias[examplesNumber]; // will be added as a first column of training data
std::fill_n(hBias, examplesNumber, 1.0);
float *dVisibleUnitsStates; // device copy of training data
float *dVisibleUnitsActivationProbabilities; // matrix of [0,1] of dimensions exv
float *dHiddenUnitsStates; // matrix of boolean values of dimensions exh
float *dPositiveHiddenUnitsActivationProbabilities; // matrix of [0,1] of dimensions exh
float *dNegativeHiddenUnitsActivationProbabilities; // matrix of [0,1] of dimensions exh
float *dPositiveAssociations; // matrix of dimensions vxh
float *dNegativeAssociations; // matrix of dimensions vxh
float *dRandom; // matrix of dimensions exh of random values [0,1]
int visibleBufferSize = (numVisible + 1) * examplesNumber;
int hiddenBufferSize = (numHidden + 1) * examplesNumber;
checkCudaError(__LINE__, cudaMalloc(&dVisibleUnitsStates, visibleBufferSize * sizeof(float)));
checkCudaError(__LINE__, cudaMalloc(&dHiddenUnitsStates, hiddenBufferSize * sizeof(float)));
checkCudaError(__LINE__, cudaMalloc(&dRandom, hiddenBufferSize * sizeof(float)));
for (int e = 0; e < maxEpochs; e++) {
// a positive phase of the contrastive divergence
// copy bias to the first column
checkCudaError(__LINE__, cudaMemcpy(dVisibleUnitsStates, hBias, examplesNumber * sizeof(float), cudaMemcpyHostToDevice));
// copy training data to remaining cells
checkCudaError(__LINE__, cudaMemcpy(&dVisibleUnitsStates[examplesNumber],
hTrainingData,
numVisible * examplesNumber * sizeof(float),
cudaMemcpyHostToDevice));
if (DEBUG) std::cout << "Visible units states:" << std::endl;
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsStates, examplesNumber, numVisible + 1);
// calculate positive hidden activation probabilities
dPositiveHiddenUnitsActivationProbabilities = hiddenActivationProbabilities(dVisibleUnitsStates, examplesNumber);
if (DEBUG) std::cout << "Fixing hidden units activation probabilities by setting bias to the first column" << std::endl;
checkCudaError(__LINE__, cudaMemcpy(dPositiveHiddenUnitsActivationProbabilities, hBias, examplesNumber * sizeof(float), cudaMemcpyHostToDevice));
if (DEBUG) printDeviceColumnMajorMatrix(dPositiveHiddenUnitsActivationProbabilities, examplesNumber, numHidden + 1);
if (DEBUG) std::cout << "Calculating hidden unit states by sampling" << std::endl;
checkCuRandError(__LINE__, curandGenerateUniform(generator, dRandom, hiddenBufferSize));
int blockNumber = hiddenBufferSize / BLOCK_SIZE + 1;
greaterThan<<<blockNumber, BLOCK_SIZE>>>(dPositiveHiddenUnitsActivationProbabilities, dRandom, dHiddenUnitsStates, examplesNumber * (numHidden + 1));
checkCudaError(__LINE__);
if (DEBUG) printDeviceColumnMajorMatrix(dHiddenUnitsStates, examplesNumber, numHidden + 1);
dPositiveAssociations = computeAssociations(dVisibleUnitsStates, dPositiveHiddenUnitsActivationProbabilities, examplesNumber);
// a negative (reconstruction) phase of the contrastive divergence
// calculate negative visible probabilities
dVisibleUnitsActivationProbabilities = visibleActivationProbabilities(dHiddenUnitsStates, examplesNumber);
if (DEBUG) std::cout << "Visible Units Activation Probabilities:" << std::endl;
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsActivationProbabilities, examplesNumber, numVisible + 1);
if (DEBUG) std::cout << "Fixing visible units activation probabilities by setting bias to the first column" << std::endl;
checkCudaError(__LINE__, cudaMemcpy(dVisibleUnitsActivationProbabilities, hBias, examplesNumber * sizeof(float), cudaMemcpyHostToDevice));
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsActivationProbabilities, examplesNumber, numVisible + 1);
// negative hidden probabilities
dNegativeHiddenUnitsActivationProbabilities = hiddenActivationProbabilities(dVisibleUnitsActivationProbabilities, examplesNumber);
if (DEBUG) std::cout << "Negative Hidden units activation probabilities:" << std::endl;
if (DEBUG) printDeviceColumnMajorMatrix(dNegativeHiddenUnitsActivationProbabilities, examplesNumber, numHidden + 1);
if (DEBUG) std::cout << "Calculating negative associations" << std::endl;
dNegativeAssociations = computeAssociations(dVisibleUnitsActivationProbabilities, dNegativeHiddenUnitsActivationProbabilities, examplesNumber);
if (DEBUG) std::cout << "Updating weights" << std::endl;
int weightsNumber = (numHidden + 1) * (numVisible + 1);
blockNumber = weightsNumber / BLOCK_SIZE + 1;
updateWeight<<<blockNumber, BLOCK_SIZE>>>(dWeights, dPositiveAssociations, dNegativeAssociations, weightsNumber, examplesNumber, learningRate);
checkCudaError(__LINE__);
if (DEBUG) printDeviceColumnMajorMatrix(dWeights, numVisible + 1, numHidden + 1);
blockNumber = visibleBufferSize / BLOCK_SIZE + 1;
if (DEBUG) std::cout << "Calculating error - squares of subtractions: " << std::endl;
// for memory efficiency we will write subtraction result to one of the input matrices (dVisibleUnitsStates)
subAndSquare<<<blockNumber, BLOCK_SIZE>>>(dVisibleUnitsStates, dVisibleUnitsActivationProbabilities, visibleBufferSize);
checkCudaError(__LINE__);
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsStates, examplesNumber, numVisible + 1);
if (DEBUG) std::cout << "Calculation error - reducing sum:" << std::endl;
thrust::device_ptr<float> dVisibleUnitsStatesPtr(dVisibleUnitsStates);
float hError = thrust::reduce(dVisibleUnitsStatesPtr, dVisibleUnitsStatesPtr + visibleBufferSize, 0.0, thrust::plus<float>());
if (DEBUG) printDeviceColumnMajorMatrix(dVisibleUnitsStates, examplesNumber, numVisible + 1);
std::cout << "Reconstruction error after epoch " << e + 1 << " is " << hError << std::endl;
checkCudaError(__LINE__, cudaFree(dVisibleUnitsActivationProbabilities));
checkCudaError(__LINE__, cudaFree(dPositiveHiddenUnitsActivationProbabilities));
checkCudaError(__LINE__, cudaFree(dNegativeHiddenUnitsActivationProbabilities));
checkCudaError(__LINE__, cudaFree(dPositiveAssociations));
checkCudaError(__LINE__, cudaFree(dNegativeAssociations));
}
checkCudaError(__LINE__, cudaFree(dRandom));
checkCudaError(__LINE__, cudaFree(dVisibleUnitsStates));
checkCudaError(__LINE__, cudaFree(dHiddenUnitsStates));
if (INFO) std::cout << "Learned weights:" << std::endl;
if (INFO) printDeviceColumnMajorMatrix(dWeights, numVisible + 1, numHidden + 1);
}
float *RBM::hiddenStates(float *hVisible) {
float *dVisible;
float *dHidden;
float *hHidden;
float *dRandom;
checkCudaError(__LINE__, cudaMalloc(&dVisible, (numVisible + 1) * sizeof(float)));
checkCudaError(__LINE__, cudaMalloc(&dHidden, (numHidden + 1) * sizeof(float)));
float bias = 1.0;
checkCudaError(__LINE__, cudaMemcpy(dVisible, &bias, sizeof(float), cudaMemcpyHostToDevice));
checkCudaError(__LINE__, cudaMemcpy(&dVisible[1], hVisible, numVisible * sizeof(float), cudaMemcpyHostToDevice)); // set bias
dHidden = hiddenActivationProbabilities(dVisible, 1);
// sampling
checkCudaError(__LINE__, cudaMalloc(&dRandom, (numHidden + 1) * sizeof(float)));
checkCuRandError(__LINE__, curandGenerateUniform(generator, dRandom, numHidden + 1));
int blockNumber = (numHidden + 1) / BLOCK_SIZE + 1;
greaterThan<<<blockNumber, BLOCK_SIZE>>>(dHidden, dRandom, dHidden, numHidden + 1);
checkCudaError(__LINE__);
hHidden = (float *) malloc(numHidden * sizeof(float));
cudaMemcpy(hHidden, &dHidden[1], numHidden * sizeof(float), cudaMemcpyDeviceToHost);
checkCudaError(__LINE__, cudaFree(dHidden));
checkCudaError(__LINE__, cudaFree(dVisible));
checkCudaError(__LINE__, cudaFree(dRandom));
return hHidden;
}
|
fde27e8489d11946eda93dabd7b642392d6f0298.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* sum of a and b stored in c */
c[blockIdx.x] = a[blockIdx.x]+b[blockIdx.x];
}
int main()
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof( int );
/* allocate space for device copies of a, b, c */
hipMalloc( (void **) &d_a, size );
hipMalloc( (void **) &d_b, size );
hipMalloc( (void **) &d_c, size );
/* setup initial values */
a = 2;
b = 7;
c = -99;
/* copy inputs to device */
hipMemcpy( d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy( d_b, &b, size, hipMemcpyHostToDevice);
/* launch the kernel on the GPU */
/* use 1 block per grid and 1 thread per block*/
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a,d_b,d_c);
/* copy result back to host */
hipMemcpy( &c, d_c, size, hipMemcpyDeviceToHost);
printf("value of c after kernel is %d\n",c);
/* clean up */
hipFree( d_a );
hipFree( d_b );
hipFree( d_c );
return 0;
} /* end main */
| fde27e8489d11946eda93dabd7b642392d6f0298.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* sum of a and b stored in c */
c[blockIdx.x] = a[blockIdx.x]+b[blockIdx.x];
}
int main()
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof( int );
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
/* setup initial values */
a = 2;
b = 7;
c = -99;
/* copy inputs to device */
cudaMemcpy( d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, &b, size, cudaMemcpyHostToDevice);
/* launch the kernel on the GPU */
/* use 1 block per grid and 1 thread per block*/
add<<<1,1>>>(d_a,d_b,d_c);
/* copy result back to host */
cudaMemcpy( &c, d_c, size, cudaMemcpyDeviceToHost);
printf("value of c after kernel is %d\n",c);
/* clean up */
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
return 0;
} /* end main */
|
1290c91904210368c9269c0e374ebda92a69f126.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 16
__global__ void mandelKernel(int* device_img, float lowerX, float lowerY, float stepX, float stepY, int width, int height, int maxIterations)
{
// To avoid error caused by the floating number, use the following pseudo code
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
unsigned int thisX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int thisY = blockIdx.y * blockDim.y + threadIdx.y;
if (thisX < width && thisY < height) {
int idx = thisY * width + thisX;
float c_re = lowerX + thisX * stepX;
float c_im = lowerY + thisY * stepY;
float z_re = c_re, z_im = c_im;
int i = 0;
for (i = 0; i < maxIterations; ++i)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
device_img[idx] = i;
}
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
int* device_img, *host_img;
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
size_t pitch;
hipMallocPitch((void **)&device_img, &pitch, sizeof(float)*resX, resY);
hipHostMalloc((void **)&host_img, resX * resY * sizeof(int),cudaHosAllocDefault);
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
dim3 numBlock(resX / BLOCK_SIZE, resY / BLOCK_SIZE);
hipLaunchKernelGGL(( mandelKernel), dim3(numBlock), dim3(blockSize), 0, 0, device_img, lowerX, lowerY, stepX, stepY, resX, resY, maxIterations);
hipDeviceSynchronize();
hipMemcpy(host_img, device_img, resX * resY * sizeof(int), hipMemcpyDeviceToHost);
memcpy(img,host_img,resX * resY * sizeof(int));
hipFree(device_img);
}
| 1290c91904210368c9269c0e374ebda92a69f126.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 16
__global__ void mandelKernel(int* device_img, float lowerX, float lowerY, float stepX, float stepY, int width, int height, int maxIterations)
{
// To avoid error caused by the floating number, use the following pseudo code
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
unsigned int thisX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int thisY = blockIdx.y * blockDim.y + threadIdx.y;
if (thisX < width && thisY < height) {
int idx = thisY * width + thisX;
float c_re = lowerX + thisX * stepX;
float c_im = lowerY + thisY * stepY;
float z_re = c_re, z_im = c_im;
int i = 0;
for (i = 0; i < maxIterations; ++i)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
device_img[idx] = i;
}
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
int* device_img, *host_img;
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
size_t pitch;
cudaMallocPitch((void **)&device_img, &pitch, sizeof(float)*resX, resY);
cudaHostAlloc((void **)&host_img, resX * resY * sizeof(int),cudaHosAllocDefault);
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
dim3 numBlock(resX / BLOCK_SIZE, resY / BLOCK_SIZE);
mandelKernel<<<numBlock, blockSize>>>(device_img, lowerX, lowerY, stepX, stepY, resX, resY, maxIterations);
cudaDeviceSynchronize();
cudaMemcpy(host_img, device_img, resX * resY * sizeof(int), cudaMemcpyDeviceToHost);
memcpy(img,host_img,resX * resY * sizeof(int));
cudaFree(device_img);
}
|
1bf6d213829790dd639a4a92589a1de6c6e50eb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
__device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);
d_out[i].x = intensity;
d_out[i].y = intensity;
d_out[i].z = 0;
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
} | 1bf6d213829790dd639a4a92589a1de6c6e50eb0.cu | #include "kernel.h"
#define TX 32
#define TY 32
__device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);
d_out[i].x = intensity;
d_out[i].y = intensity;
d_out[i].z = 0;
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
} |
0abfe1817282d6044c59854fbb39fe932048a23e.hip | // !!! This is a file automatically generated by hipify!!!
#include "PixelDataCuda.h"
#include <iostream>
#include <memory>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include "misc/CudaTools.cuh"
#include "downsample.cuh"
#include <vector>
// explicit instantiation of handled types
template void downsampleMeanCuda(const PixelData<float>&, PixelData<float>&);
template void downsampleMaxCuda(const PixelData<float>&, PixelData<float>&);
template <typename T, typename S>
void downsampleMeanCuda(const PixelData<T> &input, PixelData<S> &output) {
ScopedCudaMemHandler<const PixelData<T>, H2D> in(input);
ScopedCudaMemHandler<PixelData<S>, D2H> out(output);
runDownsampleMean(in.get(), out.get(), input.x_num, input.y_num, input.z_num, 0);
};
template <typename T, typename S>
void downsampleMaxCuda(const PixelData<T> &input, PixelData<S> &output) {
ScopedCudaMemHandler<const PixelData<T>, H2D> in(input);
ScopedCudaMemHandler<PixelData<S>, D2H> out(output);
runDownsampleMax(in.get(), out.get(), input.x_num, input.y_num, input.z_num, 0);
};
| 0abfe1817282d6044c59854fbb39fe932048a23e.cu | #include "PixelDataCuda.h"
#include <iostream>
#include <memory>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include "misc/CudaTools.cuh"
#include "downsample.cuh"
#include <vector>
// explicit instantiation of handled types
template void downsampleMeanCuda(const PixelData<float>&, PixelData<float>&);
template void downsampleMaxCuda(const PixelData<float>&, PixelData<float>&);
template <typename T, typename S>
void downsampleMeanCuda(const PixelData<T> &input, PixelData<S> &output) {
ScopedCudaMemHandler<const PixelData<T>, H2D> in(input);
ScopedCudaMemHandler<PixelData<S>, D2H> out(output);
runDownsampleMean(in.get(), out.get(), input.x_num, input.y_num, input.z_num, 0);
};
template <typename T, typename S>
void downsampleMaxCuda(const PixelData<T> &input, PixelData<S> &output) {
ScopedCudaMemHandler<const PixelData<T>, H2D> in(input);
ScopedCudaMemHandler<PixelData<S>, D2H> out(output);
runDownsampleMax(in.get(), out.get(), input.x_num, input.y_num, input.z_num, 0);
};
|
2614693c1aca8c88117f583c02deb7d4fc2a144b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <accelerate_cuda.h>
static TexInt64 arrIn0_0;
extern "C" __global__ void scanl(const Int64 shIn0_0, const Int64 shOut_0, Int64* __restrict__ arrOut_0, const Int64 shBlk_0, Int64* __restrict__ arrBlk_0, Int64* __restrict__ arrSum_0)
{
extern volatile __shared__ Int64 sdata0[];
Int64 x0;
Int64 y0;
Int64 z0;
const Int64 sh0 = shIn0_0;
const int shapeSize = sh0;
const int intervalSize = (shapeSize + gridDim.x - 1) / gridDim.x;
int carryIn = 0;
if (threadIdx.x == 0) {
if (gridDim.x > 1) {
z0 = arrBlk_0[blockIdx.x];
} else {
z0 = (Int64) 0;
}
}
const int start = blockIdx.x * intervalSize;
const int end = min(start + intervalSize, shapeSize);
const int numElements = end - start;
int seg;
for (seg = threadIdx.x; seg < numElements; seg += blockDim.x) {
const int ix = start + seg;
x0 = indexArray(arrIn0_0, ix);
if (threadIdx.x == 0) {
x0 = z0 + x0;
}
sdata0[threadIdx.x] = x0;
__syncthreads();
if (blockDim.x > 1) {
if (threadIdx.x >= 1) {
y0 = sdata0[threadIdx.x - 1];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 2) {
if (threadIdx.x >= 2) {
y0 = sdata0[threadIdx.x - 2];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 4) {
if (threadIdx.x >= 4) {
y0 = sdata0[threadIdx.x - 4];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 8) {
if (threadIdx.x >= 8) {
y0 = sdata0[threadIdx.x - 8];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 16) {
if (threadIdx.x >= 16) {
y0 = sdata0[threadIdx.x - 16];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 32) {
if (threadIdx.x >= 32) {
y0 = sdata0[threadIdx.x - 32];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 64) {
if (threadIdx.x >= 64) {
y0 = sdata0[threadIdx.x - 64];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 128) {
if (threadIdx.x >= 128) {
y0 = sdata0[threadIdx.x - 128];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 256) {
if (threadIdx.x >= 256) {
y0 = sdata0[threadIdx.x - 256];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 512) {
if (threadIdx.x >= 512) {
y0 = sdata0[threadIdx.x - 512];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (1) {
if (threadIdx.x == 0) {
x0 = z0;
} else {
x0 = sdata0[threadIdx.x - 1];
}
}
arrOut_0[ix] = x0;
if (threadIdx.x == 0) {
const int last = min(numElements - seg, blockDim.x) - 1;
z0 = sdata0[last];
}
}
if (threadIdx.x == 0 && blockIdx.x == gridDim.x - 1) {
arrSum_0[0] = z0;
}
}
| 2614693c1aca8c88117f583c02deb7d4fc2a144b.cu | #include <accelerate_cuda.h>
static TexInt64 arrIn0_0;
extern "C" __global__ void scanl(const Int64 shIn0_0, const Int64 shOut_0, Int64* __restrict__ arrOut_0, const Int64 shBlk_0, Int64* __restrict__ arrBlk_0, Int64* __restrict__ arrSum_0)
{
extern volatile __shared__ Int64 sdata0[];
Int64 x0;
Int64 y0;
Int64 z0;
const Int64 sh0 = shIn0_0;
const int shapeSize = sh0;
const int intervalSize = (shapeSize + gridDim.x - 1) / gridDim.x;
int carryIn = 0;
if (threadIdx.x == 0) {
if (gridDim.x > 1) {
z0 = arrBlk_0[blockIdx.x];
} else {
z0 = (Int64) 0;
}
}
const int start = blockIdx.x * intervalSize;
const int end = min(start + intervalSize, shapeSize);
const int numElements = end - start;
int seg;
for (seg = threadIdx.x; seg < numElements; seg += blockDim.x) {
const int ix = start + seg;
x0 = indexArray(arrIn0_0, ix);
if (threadIdx.x == 0) {
x0 = z0 + x0;
}
sdata0[threadIdx.x] = x0;
__syncthreads();
if (blockDim.x > 1) {
if (threadIdx.x >= 1) {
y0 = sdata0[threadIdx.x - 1];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 2) {
if (threadIdx.x >= 2) {
y0 = sdata0[threadIdx.x - 2];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 4) {
if (threadIdx.x >= 4) {
y0 = sdata0[threadIdx.x - 4];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 8) {
if (threadIdx.x >= 8) {
y0 = sdata0[threadIdx.x - 8];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 16) {
if (threadIdx.x >= 16) {
y0 = sdata0[threadIdx.x - 16];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 32) {
if (threadIdx.x >= 32) {
y0 = sdata0[threadIdx.x - 32];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 64) {
if (threadIdx.x >= 64) {
y0 = sdata0[threadIdx.x - 64];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 128) {
if (threadIdx.x >= 128) {
y0 = sdata0[threadIdx.x - 128];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 256) {
if (threadIdx.x >= 256) {
y0 = sdata0[threadIdx.x - 256];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (blockDim.x > 512) {
if (threadIdx.x >= 512) {
y0 = sdata0[threadIdx.x - 512];
x0 = y0 + x0;
}
__syncthreads();
sdata0[threadIdx.x] = x0;
__syncthreads();
}
if (1) {
if (threadIdx.x == 0) {
x0 = z0;
} else {
x0 = sdata0[threadIdx.x - 1];
}
}
arrOut_0[ix] = x0;
if (threadIdx.x == 0) {
const int last = min(numElements - seg, blockDim.x) - 1;
z0 = sdata0[last];
}
}
if (threadIdx.x == 0 && blockIdx.x == gridDim.x - 1) {
arrSum_0[0] = z0;
}
}
|
13ea0d776a593612c7de7798e83652f32ccbf97d.hip | // !!! This is a file automatically generated by hipify!!!
//
// CUDA DCA Driver
//
//This file invokes all of the necessary function calls to prepare
//and simulate a compound pendulum system through the use of the
//recursive DCA algorithm. The majority of this algorithm is run
//on the gpu. Output is created in a format that is
//readable in python for answer checking and graphing purposes.
//Included Files
#include <malloc.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
#include "h_code/classes.h"
#include "d_code/deviceDisassemble.h"
#include "d_code/deviceAssemble.h"
#include "d_code/deviceInitialize.h"
#include "d_code/deviceFuncts.h"
#include "h_code/npy.h"
#include <math.h>
#include <fstream>
#include <limits>
//Function Prototypes
// Function found in RK45.cu
void RK_45(double state[], double step, int n, InitBody *bs, Joint *js,double Y[]);
// Functions found in Functs.cu
void pend_init(InitBody *bs,int n,double mass, double length);
void horizontal_drop(double x[],int n);
void set_up(double A[], double B[], double C[], int n , double h);
//Main function
int main()
{
int n=1200; //Number of bodies
//Variable Declarations
InitBody* bodies; //List of bodies used for initialization only
Joint* joints; //List of joints between bodies NOTE: This joint list is not used in this version
double *inits; //Initial conditions
double *Y; //Solution to each timestep
std::ofstream myfile;
std::ofstream myfile2;
myfile2.open("Vals.mtx");
myfile.open ("output.mtx");
//System Setup
bodies = new InitBody[n]; //List of initialization bodies is length n
joints = new Joint[n]; //List of joints is length n
inits = new double[2*n]; //Initial conditions are length 2*n
Y = new double[2*n]; //Timestep solution is length 2*n
pend_init(bodies,n,1.0,1.0); //Initialize mass, length, and inertia of all bodies
//Time Setup
double tstep= 0.001; //Length of a timestep [s]
double tfinal =1; //Final time [s]
int tlen = (int) floor(tfinal/tstep)+1; //Number of timesteps
//Matrix Output Setup
//int shape1[2] = { tlen , 2*n }, fortran_order = 0; //Shape of solution matrix
//int shape2[2] = { 2 , n+1 }; //Shape of matrix holding information to calculate the energy
//double Vals[2][n+1]; //Matrix holding information to calculate the energy
typedef std::numeric_limits< double > dbl;
std::cout.precision(dbl::digits10);
myfile2<<tstep<<" ";
//Vals[0][0]=tstep; //Save the length of a timestep for plotting purposes
//Vals[1][0]=tfinal; //Save the final time for plotting purposes
hipEvent_t beginEvent;
hipEvent_t endEvent;
hipEventCreate( &beginEvent );
hipEventCreate( &endEvent );
for(int i =1; i<n+1; i++) //Loop through all of the bodies
{
myfile2<<bodies[i-1].m<<" ";
//Vals[0][i]=bodies[i-1].m; //Save the mass of body[i]
//Vals[1][i]=bodies[i-1].l; //Save the length of body[i]
}
myfile2<<"\n"<<tfinal<<" ";
for(int i =1; i<n+1; i++) //Loop through all of the bodies
{
myfile2<<bodies[i-1].l<<" ";
//Vals[0][i]=bodies[i-1].m; //Save the mass of body[i]
//Vals[1][i]=bodies[i-1].l; //Save the length of body[i]
}
//System Initialization
horizontal_drop(inits,n); //Set the initial conditions
//Save the initial conditions in the solution matrix
for(int r=0; r<2*n; r++)
{
myfile << inits[r]<< " ";
}
myfile << "\n";
hipEventRecord( beginEvent, 0 );
//Numerical Integration
for(int t=1; t<tlen; t++) //Loop through every timestep
{
RK_45(inits,tstep,n,bodies,joints,Y); //Find the solution at that timestep
for(int i = 0; i<2*n;i++) //Loop through the solution
{
inits[i]=Y[i]; //Use the solution as the initial conditions for the next timestep
myfile << inits[i]<<" ";
}
myfile << "\n";
}
hipEventRecord( endEvent, 0 );
hipEventSynchronize( endEvent );
float timeValue;
hipEventElapsedTime( &timeValue, beginEvent, endEvent );
if ( hipSuccess != hipGetLastError() )
printf( "Error!\n" );
std::cout << "Time: " << timeValue << std::endl;
//Solution Output
//npy_save_double("Vals.npy",fortran_order,2,shape2,&Vals[0][0]); //Output values to find energy
//Free memory
delete[] inits;
delete[] Y;
delete[] bodies;
delete[] joints;
myfile.close();
myfile2.close();
return EXIT_SUCCESS; //Program completed successfully
}
| 13ea0d776a593612c7de7798e83652f32ccbf97d.cu | //
// CUDA DCA Driver
//
//This file invokes all of the necessary function calls to prepare
//and simulate a compound pendulum system through the use of the
//recursive DCA algorithm. The majority of this algorithm is run
//on the gpu. Output is created in a format that is
//readable in python for answer checking and graphing purposes.
//Included Files
#include <malloc.h>
#include <cuda.h>
#include <iostream>
#include <math.h>
#include "h_code/classes.h"
#include "d_code/deviceDisassemble.h"
#include "d_code/deviceAssemble.h"
#include "d_code/deviceInitialize.h"
#include "d_code/deviceFuncts.h"
#include "h_code/npy.h"
#include <math.h>
#include <fstream>
#include <limits>
//Function Prototypes
// Function found in RK45.cu
void RK_45(double state[], double step, int n, InitBody *bs, Joint *js,double Y[]);
// Functions found in Functs.cu
void pend_init(InitBody *bs,int n,double mass, double length);
void horizontal_drop(double x[],int n);
void set_up(double A[], double B[], double C[], int n , double h);
//Main function
int main()
{
int n=1200; //Number of bodies
//Variable Declarations
InitBody* bodies; //List of bodies used for initialization only
Joint* joints; //List of joints between bodies NOTE: This joint list is not used in this version
double *inits; //Initial conditions
double *Y; //Solution to each timestep
std::ofstream myfile;
std::ofstream myfile2;
myfile2.open("Vals.mtx");
myfile.open ("output.mtx");
//System Setup
bodies = new InitBody[n]; //List of initialization bodies is length n
joints = new Joint[n]; //List of joints is length n
inits = new double[2*n]; //Initial conditions are length 2*n
Y = new double[2*n]; //Timestep solution is length 2*n
pend_init(bodies,n,1.0,1.0); //Initialize mass, length, and inertia of all bodies
//Time Setup
double tstep= 0.001; //Length of a timestep [s]
double tfinal =1; //Final time [s]
int tlen = (int) floor(tfinal/tstep)+1; //Number of timesteps
//Matrix Output Setup
//int shape1[2] = { tlen , 2*n }, fortran_order = 0; //Shape of solution matrix
//int shape2[2] = { 2 , n+1 }; //Shape of matrix holding information to calculate the energy
//double Vals[2][n+1]; //Matrix holding information to calculate the energy
typedef std::numeric_limits< double > dbl;
std::cout.precision(dbl::digits10);
myfile2<<tstep<<" ";
//Vals[0][0]=tstep; //Save the length of a timestep for plotting purposes
//Vals[1][0]=tfinal; //Save the final time for plotting purposes
cudaEvent_t beginEvent;
cudaEvent_t endEvent;
cudaEventCreate( &beginEvent );
cudaEventCreate( &endEvent );
for(int i =1; i<n+1; i++) //Loop through all of the bodies
{
myfile2<<bodies[i-1].m<<" ";
//Vals[0][i]=bodies[i-1].m; //Save the mass of body[i]
//Vals[1][i]=bodies[i-1].l; //Save the length of body[i]
}
myfile2<<"\n"<<tfinal<<" ";
for(int i =1; i<n+1; i++) //Loop through all of the bodies
{
myfile2<<bodies[i-1].l<<" ";
//Vals[0][i]=bodies[i-1].m; //Save the mass of body[i]
//Vals[1][i]=bodies[i-1].l; //Save the length of body[i]
}
//System Initialization
horizontal_drop(inits,n); //Set the initial conditions
//Save the initial conditions in the solution matrix
for(int r=0; r<2*n; r++)
{
myfile << inits[r]<< " ";
}
myfile << "\n";
cudaEventRecord( beginEvent, 0 );
//Numerical Integration
for(int t=1; t<tlen; t++) //Loop through every timestep
{
RK_45(inits,tstep,n,bodies,joints,Y); //Find the solution at that timestep
for(int i = 0; i<2*n;i++) //Loop through the solution
{
inits[i]=Y[i]; //Use the solution as the initial conditions for the next timestep
myfile << inits[i]<<" ";
}
myfile << "\n";
}
cudaEventRecord( endEvent, 0 );
cudaEventSynchronize( endEvent );
float timeValue;
cudaEventElapsedTime( &timeValue, beginEvent, endEvent );
if ( cudaSuccess != cudaGetLastError() )
printf( "Error!\n" );
std::cout << "Time: " << timeValue << std::endl;
//Solution Output
//npy_save_double("Vals.npy",fortran_order,2,shape2,&Vals[0][0]); //Output values to find energy
//Free memory
delete[] inits;
delete[] Y;
delete[] bodies;
delete[] joints;
myfile.close();
myfile2.close();
return EXIT_SUCCESS; //Program completed successfully
}
|
1bb349365da9da676c369a524b8cc0adb322b6d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <iostream>
#include "data_manager.h"
#include "gpu_util.h"
#include "gpu_benchmark.h"
#define VECTOR_LENGTH 256
#define M_BLOCK 32
#define K_BLOCK 64
#define N_BLOCK 256
__global__
void opt2MulKernel(size_t pitch_A, size_t pitch_B, size_t pitch_C,
float* cuda_A, float* cuda_B, float* cuda_C,
int k);
void opt2GPU_gemm_execute(GemmRun<float>* run) {
size_t pitch_A, pitch_B, pitch_C, cuda_lda, cuda_ldb, cuda_ldc;
float* cuda_A;
float* cuda_B;
float* cuda_C;
init_cuda_matrices(run, &pitch_A, &pitch_B, &pitch_C,
&cuda_A, &cuda_B, &cuda_C);
cuda_lda = pitch_A / sizeof(float);
cuda_ldb = pitch_B / sizeof(float);
cuda_ldc = pitch_C / sizeof(float);
// Invoke kernel
dim3 dimBlock(K_BLOCK, VECTOR_LENGTH / K_BLOCK);
dim3 dimGrid(run->n / N_BLOCK, run->m / M_BLOCK);
hipLaunchKernelGGL(( opt2MulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, cuda_lda, cuda_ldb, cuda_ldc,
cuda_A, cuda_B, cuda_C, run->k);
hipDeviceSynchronize();
// printf("%s\n", hipGetErrorString(hipGetLastError()));
deinit_cuda_matrices(run, pitch_C, cuda_A, cuda_B, cuda_C);
}
__global__ void opt2MulKernel(size_t pitch_A, size_t pitch_B, size_t pitch_C,
float* cuda_A, float* cuda_B, float* cuda_C,
int k) {
unsigned int A_block_offset = (blockIdx.y * M_BLOCK) * pitch_A;
unsigned int B_block_offset = blockIdx.x * N_BLOCK;
unsigned int C_block_offset = (blockIdx.y * M_BLOCK) * pitch_C + blockIdx.x * N_BLOCK;
float Cvalue[M_BLOCK];
#pragma unroll 32
for (unsigned int i = 0; i < M_BLOCK; i++) {
Cvalue[i] = 0;
}
for (unsigned int block_idx = 0; block_idx < (k / K_BLOCK); block_idx++) {
__shared__ float A_shared[M_BLOCK][K_BLOCK];
// load the block from A
for (int j = 0; j < M_BLOCK; j += blockDim.y) {
A_shared[j + threadIdx.y][threadIdx.x] = cuda_A[A_block_offset + (block_idx * K_BLOCK) + ((threadIdx.y + j) * pitch_A) + threadIdx.x];
}
__syncthreads();
for (int i = 0; i < K_BLOCK; i++) {
float b = cuda_B[B_block_offset + ((block_idx * K_BLOCK + i) * pitch_B) + threadIdx.y * blockDim.x + threadIdx.x];
#pragma unroll 32
for (int k = 0; k < M_BLOCK; k++) {
Cvalue[k] += b * A_shared[k][i];
}
}
__syncthreads();
}
#pragma unroll 32
for (unsigned int i = 0; i < M_BLOCK; i++) {
cuda_C[C_block_offset + i * pitch_C + (threadIdx.y * blockDim.x + threadIdx.x)] = Cvalue[i];
}
}
| 1bb349365da9da676c369a524b8cc0adb322b6d0.cu | #include <cstdio>
#include <iostream>
#include "data_manager.h"
#include "gpu_util.h"
#include "gpu_benchmark.h"
#define VECTOR_LENGTH 256
#define M_BLOCK 32
#define K_BLOCK 64
#define N_BLOCK 256
__global__
void opt2MulKernel(size_t pitch_A, size_t pitch_B, size_t pitch_C,
float* cuda_A, float* cuda_B, float* cuda_C,
int k);
void opt2GPU_gemm_execute(GemmRun<float>* run) {
size_t pitch_A, pitch_B, pitch_C, cuda_lda, cuda_ldb, cuda_ldc;
float* cuda_A;
float* cuda_B;
float* cuda_C;
init_cuda_matrices(run, &pitch_A, &pitch_B, &pitch_C,
&cuda_A, &cuda_B, &cuda_C);
cuda_lda = pitch_A / sizeof(float);
cuda_ldb = pitch_B / sizeof(float);
cuda_ldc = pitch_C / sizeof(float);
// Invoke kernel
dim3 dimBlock(K_BLOCK, VECTOR_LENGTH / K_BLOCK);
dim3 dimGrid(run->n / N_BLOCK, run->m / M_BLOCK);
opt2MulKernel<<<dimGrid, dimBlock>>>(cuda_lda, cuda_ldb, cuda_ldc,
cuda_A, cuda_B, cuda_C, run->k);
cudaDeviceSynchronize();
// printf("%s\n", cudaGetErrorString(cudaGetLastError()));
deinit_cuda_matrices(run, pitch_C, cuda_A, cuda_B, cuda_C);
}
__global__ void opt2MulKernel(size_t pitch_A, size_t pitch_B, size_t pitch_C,
float* cuda_A, float* cuda_B, float* cuda_C,
int k) {
unsigned int A_block_offset = (blockIdx.y * M_BLOCK) * pitch_A;
unsigned int B_block_offset = blockIdx.x * N_BLOCK;
unsigned int C_block_offset = (blockIdx.y * M_BLOCK) * pitch_C + blockIdx.x * N_BLOCK;
float Cvalue[M_BLOCK];
#pragma unroll 32
for (unsigned int i = 0; i < M_BLOCK; i++) {
Cvalue[i] = 0;
}
for (unsigned int block_idx = 0; block_idx < (k / K_BLOCK); block_idx++) {
__shared__ float A_shared[M_BLOCK][K_BLOCK];
// load the block from A
for (int j = 0; j < M_BLOCK; j += blockDim.y) {
A_shared[j + threadIdx.y][threadIdx.x] = cuda_A[A_block_offset + (block_idx * K_BLOCK) + ((threadIdx.y + j) * pitch_A) + threadIdx.x];
}
__syncthreads();
for (int i = 0; i < K_BLOCK; i++) {
float b = cuda_B[B_block_offset + ((block_idx * K_BLOCK + i) * pitch_B) + threadIdx.y * blockDim.x + threadIdx.x];
#pragma unroll 32
for (int k = 0; k < M_BLOCK; k++) {
Cvalue[k] += b * A_shared[k][i];
}
}
__syncthreads();
}
#pragma unroll 32
for (unsigned int i = 0; i < M_BLOCK; i++) {
cuda_C[C_block_offset + i * pitch_C + (threadIdx.y * blockDim.x + threadIdx.x)] = Cvalue[i];
}
}
|
75e012d90e405c3a3990223e2886730fbb4b538a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2021, Carnegie Mellon University
* See LICENSE for details
*/
/***************************************************************************
* SPL Matrix *
* *
* Computes matrix that corresponds to SPL generated routine *
***************************************************************************/
#include <limits.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include <helper_cuda.h>
#ifndef ROWS
#error ROWS must be defined
#endif
#ifndef COLUMNS
#error COLUMNS must be defined
#endif
hipfftDoubleReal *Input, *Output;
hipfftDoubleReal *dev_in, *dev_out;
void initialize(int argc, char **argv) {
hipHostMalloc ( &Input, sizeof(hipfftDoubleReal) * COLUMNS );
hipHostMalloc ( &Output, sizeof(hipfftDoubleReal) * ROWS );
hipMalloc ( &dev_in, sizeof(hipfftDoubleReal) * COLUMNS );
hipMalloc ( &dev_out, sizeof(hipfftDoubleReal) * ROWS );
INITFUNC();
}
void finalize() {
hipHostFree (Output);
hipHostFree (Input);
hipFree (dev_out);
hipFree (dev_in);
}
void set_value_in_vector(hipfftDoubleReal *arr, int elem)
{
// Zero array and put '1' in the location indicated by element
int idx;
for (idx = 0; idx < COLUMNS; idx++)
arr[idx] = (idx == elem) ? 1.0 : 0.0;
return;
}
void compute_matrix()
{
int x, y;
printf("[ ");
for (x = 0; x < COLUMNS; x++) {
set_value_in_vector(Input, x);
hipMemcpy ( dev_in, Input, sizeof(hipfftDoubleReal) * COLUMNS, hipMemcpyHostToDevice);
FUNC(dev_out, dev_in);
hipMemcpy ( Output, dev_out, sizeof(hipfftDoubleReal) * ROWS, hipMemcpyDeviceToHost);
if (x != 0) {
printf(",\n [ ");
}
else {
printf("[ ");
}
for (y = 0; y < ROWS; y++) {
if (y != 0) {
printf(", ");
}
printf("FloatString(\"%.18g\")", Output[y]);
}
printf(" ]");
}
printf("\n];\n");
}
int main(int argc, char** argv) {
initialize(argc, argv);
compute_matrix();
finalize();
return EXIT_SUCCESS;
}
| 75e012d90e405c3a3990223e2886730fbb4b538a.cu | /*
* Copyright (c) 2018-2021, Carnegie Mellon University
* See LICENSE for details
*/
/***************************************************************************
* SPL Matrix *
* *
* Computes matrix that corresponds to SPL generated routine *
***************************************************************************/
#include <limits.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cufft.h>
#include <cufftXt.h>
#include <helper_cuda.h>
#ifndef ROWS
#error ROWS must be defined
#endif
#ifndef COLUMNS
#error COLUMNS must be defined
#endif
cufftDoubleReal *Input, *Output;
cufftDoubleReal *dev_in, *dev_out;
void initialize(int argc, char **argv) {
cudaMallocHost ( &Input, sizeof(cufftDoubleReal) * COLUMNS );
cudaMallocHost ( &Output, sizeof(cufftDoubleReal) * ROWS );
cudaMalloc ( &dev_in, sizeof(cufftDoubleReal) * COLUMNS );
cudaMalloc ( &dev_out, sizeof(cufftDoubleReal) * ROWS );
INITFUNC();
}
void finalize() {
cudaFreeHost (Output);
cudaFreeHost (Input);
cudaFree (dev_out);
cudaFree (dev_in);
}
void set_value_in_vector(cufftDoubleReal *arr, int elem)
{
// Zero array and put '1' in the location indicated by element
int idx;
for (idx = 0; idx < COLUMNS; idx++)
arr[idx] = (idx == elem) ? 1.0 : 0.0;
return;
}
void compute_matrix()
{
int x, y;
printf("[ ");
for (x = 0; x < COLUMNS; x++) {
set_value_in_vector(Input, x);
cudaMemcpy ( dev_in, Input, sizeof(cufftDoubleReal) * COLUMNS, cudaMemcpyHostToDevice);
FUNC(dev_out, dev_in);
cudaMemcpy ( Output, dev_out, sizeof(cufftDoubleReal) * ROWS, cudaMemcpyDeviceToHost);
if (x != 0) {
printf(",\n [ ");
}
else {
printf("[ ");
}
for (y = 0; y < ROWS; y++) {
if (y != 0) {
printf(", ");
}
printf("FloatString(\"%.18g\")", Output[y]);
}
printf(" ]");
}
printf("\n];\n");
}
int main(int argc, char** argv) {
initialize(argc, argv);
compute_matrix();
finalize();
return EXIT_SUCCESS;
}
|
bbb734829204bffa7df89a219db90a3775cf8b17.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_wtf.cu
*
* @brief Simple test driver program for computing Pagerank.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
#include <cstdlib>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BFS includes
#include <gunrock/app/wtf/wtf_enactor.cuh>
#include <gunrock/app/wtf/wtf_problem.cuh>
#include <gunrock/app/wtf/wtf_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// boost includes
#include <boost/config.hpp>
#include <boost/utility.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/page_rank.hpp>
using namespace gunrock;
using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::wtf;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
//bool g_verbose;
//bool g_undirected;
//bool g_quick;
//bool g_stream_from_host;
template <typename VertexId, typename Value>
struct RankPair {
VertexId vertex_id;
Value page_rank;
RankPair(VertexId vertex_id, Value page_rank) : vertex_id(vertex_id), page_rank(page_rank) {}
};
template<typename RankPair>
bool PRCompare(
RankPair elem1,
RankPair elem2)
{
return elem1.page_rank > elem2.page_rank;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf("\ntest_wtf <graph type> <graph type args> [--device=<device_index>] "
"[--undirected] [--instrumented] [--quick] "
"[--v]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the graph primitive.\n"
" --undirected If set then treat the graph as undirected.\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance.)\n"
" --quick If set will skip the CPU validation code.\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @param[in] node_id Pointer to node ID array
* @param[in] rank Pointer to node rank score array
* @param[in] nodes Number of nodes in the graph.
*/
template<typename VertexId, typename Value, typename SizeT>
void DisplaySolution(VertexId *node_id, Value *rank, SizeT nodes)
{
// Print out at most top 10 largest components
int top = (nodes < 10) ? nodes : 10;
printf("Top %d Page Ranks:\n", top);
for (int i = 0; i < top; ++i)
{
printf("Vertex ID: %d, Page Rank: %5f\n", node_id[i], rank[i]);
}
}
/******************************************************************************
* WTF Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference WTF implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node ID for WTF algorithm
* @param[out] node_id Pointer to store computed output node ID
* @param[in] rank Host-side vector to store CPU computed labels for each node
* @param[in] delta Delta value for computing PageRank score
* @param[in] alpha Parameter to adjust iteration number
* @param[in] max_iter max iteration to go
*/
// TODO: Boost PageRank cannot handle personalized pagerank, so currently the CPU
// implementation gives incorrect answer. Need to find a CPU PPR implementation
template<
typename VertexId,
typename Value,
typename SizeT>
void SimpleReferenceWTF(
const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
VertexId *node_id,
Value *rank,
Value delta,
Value alpha,
SizeT max_iter)
{
using namespace boost;
//Preparation
typedef adjacency_list<vecS, vecS, bidirectionalS, no_property,
property<edge_index_t, int> > Graph;
Graph g;
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j)
{
Graph::edge_descriptor e =
add_edge(i, graph.column_indices[j], g).first;
put(edge_index, g, e, i);
}
}
//
//compute page rank
//
CpuTimer cpu_timer;
cpu_timer.Start();
//remove_dangling_links(g);
std::vector<Value> ranks(num_vertices(g));
page_rank(g, make_iterator_property_map(
ranks.begin(), get(boost::vertex_index, g)),
boost::graph::n_iterations(max_iter));
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
rank[i] = ranks[i];
}
//sort the top page ranks
RankPair<SizeT, Value> *pr_list =
(RankPair<SizeT, Value>*)malloc(
sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
pr_list[i].vertex_id = i;
pr_list[i].page_rank = rank[i];
}
std::stable_sort(
pr_list, pr_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >);
std::vector<int> in_degree(num_vertices(g));
std::vector<Value> refscore(num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
node_id[i] = pr_list[i].vertex_id;
rank[i] = (i == src) ? 1.0 : 0;
in_degree[i] = 0;
refscore[i] = 0;
}
free(pr_list);
int cot_size = (graph.nodes > 1000) ? 1000 : graph.nodes;
for (int i = 0; i < cot_size; ++i)
{
int node = node_id[i];
for (int j = graph.row_offsets[node];
j < graph.row_offsets[node+1]; ++j)
{
VertexId edge = graph.column_indices[j];
++in_degree[edge];
}
}
int salsa_iter = 1.0/alpha+1;
for (int iter = 0; iter < salsa_iter; ++iter)
{
for (int i = 0; i < cot_size; ++i)
{
int node = node_id[i];
int out_degree = graph.row_offsets[node+1]-graph.row_offsets[node];
for (int j = graph.row_offsets[node];
j < graph.row_offsets[node+1]; ++j)
{
VertexId edge = graph.column_indices[j];
Value val = rank[node]/ (out_degree > 0 ? out_degree : 1.0);
refscore[edge] += val;
}
}
for (int i = 0; i < cot_size; ++i)
{
rank[node_id[i]] = 0;
}
for (int i = 0; i < cot_size; ++i)
{
int node = node_id[i];
rank[node] += (node == src) ? alpha : 0;
for (int j = graph.row_offsets[node];
j < graph.row_offsets[node+1]; ++j)
{
VertexId edge = graph.column_indices[j];
Value val = (1-alpha)*refscore[edge]/in_degree[edge];
rank[node] += val;
}
}
for (int i = 0; i < cot_size; ++i)
{
if (iter+1<salsa_iter) refscore[node_id[i]] = 0;
}
}
//sort the top page ranks
RankPair<SizeT, Value> *final_list =
(RankPair<SizeT, Value>*)malloc(
sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
final_list[i].vertex_id = node_id[i];
final_list[i].page_rank = refscore[i];
}
std::stable_sort(
final_list, final_list + num_vertices(g),
PRCompare<RankPair<SizeT, Value> >);
for (int i = 0; i < num_vertices(g); ++i)
{
node_id[i] = final_list[i].vertex_id;
rank[i] = final_list[i].page_rank;
}
free(final_list);
printf("CPU Who-To-Follow finished in %lf msec.\n", elapsed);
}
/**
* @brief Run HITS tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node ID for WTF algorithm
* @param[in] delta Delta value for computing WTF, usually set to .85
* @param[in] alpha Parameter to adjust iteration number
* @param[in] error Error threshold value
* @param[in] max_iter Max iteration for WTF computing
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] context CudaContext for moderngpu to use
*
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK>
void RunTests(Info<VertexId, Value, SizeT> *info)
{
typedef WTFProblem<
VertexId,
SizeT,
Value> Problem;
Csr<VertexId, Value, SizeT>
*csr = info->csr_ptr;
VertexId src = info->info["source_vertex"].get_int64();
int max_grid_size = info->info["max_grid_size"].get_int();
int num_gpus = info->info["num_gpus"].get_int();
bool quick_mode = info->info["quick_mode"].get_bool();
bool quiet_mode = info->info["quiet_mode"].get_bool();
bool stream_from_host = info->info["stream_from_host"].get_bool();
Value alpha = info->info["alpha"].get_real();
Value delta = info->info["delta"].get_real();
Value error = info->info["error"].get_real();
SizeT max_iter = info->info["max_iteration"].get_int();
ContextPtr *context = (ContextPtr*)info->context;
json_spirit::mArray device_list = info->info["device_list"].get_array();
int* gpu_idx = new int[num_gpus];
for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int();
// Allocate host-side label array (for both reference and gpu-computed results)
Value *reference_rank = (Value*)malloc(sizeof(Value) * csr->nodes);
Value *h_rank = (Value*)malloc(sizeof(Value) * csr->nodes);
VertexId *h_node_id = (VertexId*)malloc(sizeof(VertexId) * csr->nodes);
VertexId *reference_node_id = (VertexId*)malloc(sizeof(VertexId) * csr->nodes);
Value *reference_check = (quick_mode) ? NULL : reference_rank;
// Allocate WTF enactor map
WTFEnactor<Problem, INSTRUMENT, DEBUG, SIZE_CHECK> wtf_enactor(gpu_idx);
// Allocate problem on GPU
Problem *problem = new Problem;
util::GRError(problem->Init(
stream_from_host,
*csr,
num_gpus),
"Problem WTF Initialization Failed", __FILE__, __LINE__);
// Perform WTF
GpuTimer gpu_timer;
util::GRError(
problem->Reset(
src, delta, alpha, error, wtf_enactor.GetFrontierType()),
"pr Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
wtf_enactor.template Enact<Problem>(
*context, src, alpha, problem, max_iter, max_grid_size),
"HITS Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
float elapsed = gpu_timer.ElapsedMillis();
// Copy out results
util::GRError(
problem->Extract(h_rank, h_node_id),
"HITS Problem Data Extraction Failed", __FILE__, __LINE__);
float total_pr = 0;
for (int i = 0; i < csr->nodes; ++i)
{
total_pr += h_rank[i];
}
//
// Compute reference CPU HITS solution for source-distance
//
if (reference_check != NULL && total_pr > 0)
{
if (!quiet_mode) printf("compute ref value\n");
SimpleReferenceWTF(
*csr,
src,
reference_node_id,
reference_check,
delta,
alpha,
max_iter);
if (!quiet_mode) printf("\n");
}
// Verify the result
if (reference_check != NULL && total_pr > 0)
{
if (!quiet_mode) printf("Validity: ");
CompareResults(h_rank, reference_check, csr->nodes, true);
}
if (!quiet_mode) {
printf("\nGPU result.");
DisplaySolution(h_node_id, h_rank, csr->nodes);
}
info->ComputeCommonStats(wtf_enactor.enactor_stats.GetPointer(), elapsed);
if (!quiet_mode)
info->DisplayStats();
info->CollectInfo();
// Cleanup
if (problem) delete problem;
if (reference_check) free(reference_check);
if (h_rank) free(h_rank);
hipDeviceSynchronize();
}
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG>
void RunTests_size_check(Info<VertexId, Value, SizeT> *info)
{
if (info->info["size_check"].get_bool()) RunTests
<VertexId, Value, SizeT, INSTRUMENT, DEBUG,
true > (info);
else RunTests
<VertexId, Value, SizeT, INSTRUMENT, DEBUG,
false> (info);
}
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests_debug(Info<VertexId, Value, SizeT> *info)
{
if (info->info["debug_mode"].get_bool()) RunTests_size_check
<VertexId, Value, SizeT, INSTRUMENT,
true > (info);
else RunTests_size_check
<VertexId, Value, SizeT, INSTRUMENT,
false> (info);
}
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests_instrumented(Info<VertexId, Value, SizeT> *info)
{
if (info->info["instrument"].get_bool()) RunTests_debug
<VertexId, Value, SizeT,
true > (info);
else RunTests_debug
<VertexId, Value, SizeT,
false> (info);
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
int graph_args = argc - args.ParsedArgc() - 1;
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
typedef int VertexId; // Use as the node identifier
typedef float Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
Info<VertexId, Value, SizeT> *info = new Info<VertexId, Value, SizeT>;
info->info["undirected"] = args.CheckCmdLineFlag("undirected");
info->Init("WTF", args, csr);
RunTests_instrumented<VertexId, Value, SizeT>(info);
return 0;
}
| bbb734829204bffa7df89a219db90a3775cf8b17.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_wtf.cu
*
* @brief Simple test driver program for computing Pagerank.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
#include <cstdlib>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
// BFS includes
#include <gunrock/app/wtf/wtf_enactor.cuh>
#include <gunrock/app/wtf/wtf_problem.cuh>
#include <gunrock/app/wtf/wtf_functor.cuh>
// Operator includes
#include <gunrock/oprtr/advance/kernel.cuh>
#include <gunrock/oprtr/filter/kernel.cuh>
#include <moderngpu.cuh>
// boost includes
#include <boost/config.hpp>
#include <boost/utility.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/page_rank.hpp>
using namespace gunrock;
using namespace gunrock::app;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::wtf;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
//bool g_verbose;
//bool g_undirected;
//bool g_quick;
//bool g_stream_from_host;
template <typename VertexId, typename Value>
struct RankPair {
VertexId vertex_id;
Value page_rank;
RankPair(VertexId vertex_id, Value page_rank) : vertex_id(vertex_id), page_rank(page_rank) {}
};
template<typename RankPair>
bool PRCompare(
RankPair elem1,
RankPair elem2)
{
return elem1.page_rank > elem2.page_rank;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf("\ntest_wtf <graph type> <graph type args> [--device=<device_index>] "
"[--undirected] [--instrumented] [--quick] "
"[--v]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the graph primitive.\n"
" --undirected If set then treat the graph as undirected.\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance.)\n"
" --quick If set will skip the CPU validation code.\n"
);
}
/**
* @brief Displays the BFS result (i.e., distance from source)
*
* @param[in] node_id Pointer to node ID array
* @param[in] rank Pointer to node rank score array
* @param[in] nodes Number of nodes in the graph.
*/
template<typename VertexId, typename Value, typename SizeT>
void DisplaySolution(VertexId *node_id, Value *rank, SizeT nodes)
{
// Print out at most top 10 largest components
int top = (nodes < 10) ? nodes : 10;
printf("Top %d Page Ranks:\n", top);
for (int i = 0; i < top; ++i)
{
printf("Vertex ID: %d, Page Rank: %5f\n", node_id[i], rank[i]);
}
}
/******************************************************************************
* WTF Testing Routines
*****************************************************************************/
/**
* @brief A simple CPU-based reference WTF implementation.
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node ID for WTF algorithm
* @param[out] node_id Pointer to store computed output node ID
* @param[in] rank Host-side vector to store CPU computed labels for each node
* @param[in] delta Delta value for computing PageRank score
* @param[in] alpha Parameter to adjust iteration number
* @param[in] max_iter max iteration to go
*/
// TODO: Boost PageRank cannot handle personalized pagerank, so currently the CPU
// implementation gives incorrect answer. Need to find a CPU PPR implementation
template<
typename VertexId,
typename Value,
typename SizeT>
void SimpleReferenceWTF(
const Csr<VertexId, Value, SizeT> &graph,
VertexId src,
VertexId *node_id,
Value *rank,
Value delta,
Value alpha,
SizeT max_iter)
{
using namespace boost;
//Preparation
typedef adjacency_list<vecS, vecS, bidirectionalS, no_property,
property<edge_index_t, int> > Graph;
Graph g;
for (int i = 0; i < graph.nodes; ++i)
{
for (int j = graph.row_offsets[i]; j < graph.row_offsets[i+1]; ++j)
{
Graph::edge_descriptor e =
add_edge(i, graph.column_indices[j], g).first;
put(edge_index, g, e, i);
}
}
//
//compute page rank
//
CpuTimer cpu_timer;
cpu_timer.Start();
//remove_dangling_links(g);
std::vector<Value> ranks(num_vertices(g));
page_rank(g, make_iterator_property_map(
ranks.begin(), get(boost::vertex_index, g)),
boost::graph::n_iterations(max_iter));
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
for (std::size_t i = 0; i < num_vertices(g); ++i)
{
rank[i] = ranks[i];
}
//sort the top page ranks
RankPair<SizeT, Value> *pr_list =
(RankPair<SizeT, Value>*)malloc(
sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
pr_list[i].vertex_id = i;
pr_list[i].page_rank = rank[i];
}
std::stable_sort(
pr_list, pr_list + num_vertices(g), PRCompare<RankPair<SizeT, Value> >);
std::vector<int> in_degree(num_vertices(g));
std::vector<Value> refscore(num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
node_id[i] = pr_list[i].vertex_id;
rank[i] = (i == src) ? 1.0 : 0;
in_degree[i] = 0;
refscore[i] = 0;
}
free(pr_list);
int cot_size = (graph.nodes > 1000) ? 1000 : graph.nodes;
for (int i = 0; i < cot_size; ++i)
{
int node = node_id[i];
for (int j = graph.row_offsets[node];
j < graph.row_offsets[node+1]; ++j)
{
VertexId edge = graph.column_indices[j];
++in_degree[edge];
}
}
int salsa_iter = 1.0/alpha+1;
for (int iter = 0; iter < salsa_iter; ++iter)
{
for (int i = 0; i < cot_size; ++i)
{
int node = node_id[i];
int out_degree = graph.row_offsets[node+1]-graph.row_offsets[node];
for (int j = graph.row_offsets[node];
j < graph.row_offsets[node+1]; ++j)
{
VertexId edge = graph.column_indices[j];
Value val = rank[node]/ (out_degree > 0 ? out_degree : 1.0);
refscore[edge] += val;
}
}
for (int i = 0; i < cot_size; ++i)
{
rank[node_id[i]] = 0;
}
for (int i = 0; i < cot_size; ++i)
{
int node = node_id[i];
rank[node] += (node == src) ? alpha : 0;
for (int j = graph.row_offsets[node];
j < graph.row_offsets[node+1]; ++j)
{
VertexId edge = graph.column_indices[j];
Value val = (1-alpha)*refscore[edge]/in_degree[edge];
rank[node] += val;
}
}
for (int i = 0; i < cot_size; ++i)
{
if (iter+1<salsa_iter) refscore[node_id[i]] = 0;
}
}
//sort the top page ranks
RankPair<SizeT, Value> *final_list =
(RankPair<SizeT, Value>*)malloc(
sizeof(RankPair<SizeT, Value>) * num_vertices(g));
for (int i = 0; i < num_vertices(g); ++i)
{
final_list[i].vertex_id = node_id[i];
final_list[i].page_rank = refscore[i];
}
std::stable_sort(
final_list, final_list + num_vertices(g),
PRCompare<RankPair<SizeT, Value> >);
for (int i = 0; i < num_vertices(g); ++i)
{
node_id[i] = final_list[i].vertex_id;
rank[i] = final_list[i].page_rank;
}
free(final_list);
printf("CPU Who-To-Follow finished in %lf msec.\n", elapsed);
}
/**
* @brief Run HITS tests
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] src Source node ID for WTF algorithm
* @param[in] delta Delta value for computing WTF, usually set to .85
* @param[in] alpha Parameter to adjust iteration number
* @param[in] error Error threshold value
* @param[in] max_iter Max iteration for WTF computing
* @param[in] max_grid_size Maximum CTA occupancy
* @param[in] num_gpus Number of GPUs
* @param[in] context CudaContext for moderngpu to use
*
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK>
void RunTests(Info<VertexId, Value, SizeT> *info)
{
typedef WTFProblem<
VertexId,
SizeT,
Value> Problem;
Csr<VertexId, Value, SizeT>
*csr = info->csr_ptr;
VertexId src = info->info["source_vertex"].get_int64();
int max_grid_size = info->info["max_grid_size"].get_int();
int num_gpus = info->info["num_gpus"].get_int();
bool quick_mode = info->info["quick_mode"].get_bool();
bool quiet_mode = info->info["quiet_mode"].get_bool();
bool stream_from_host = info->info["stream_from_host"].get_bool();
Value alpha = info->info["alpha"].get_real();
Value delta = info->info["delta"].get_real();
Value error = info->info["error"].get_real();
SizeT max_iter = info->info["max_iteration"].get_int();
ContextPtr *context = (ContextPtr*)info->context;
json_spirit::mArray device_list = info->info["device_list"].get_array();
int* gpu_idx = new int[num_gpus];
for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int();
// Allocate host-side label array (for both reference and gpu-computed results)
Value *reference_rank = (Value*)malloc(sizeof(Value) * csr->nodes);
Value *h_rank = (Value*)malloc(sizeof(Value) * csr->nodes);
VertexId *h_node_id = (VertexId*)malloc(sizeof(VertexId) * csr->nodes);
VertexId *reference_node_id = (VertexId*)malloc(sizeof(VertexId) * csr->nodes);
Value *reference_check = (quick_mode) ? NULL : reference_rank;
// Allocate WTF enactor map
WTFEnactor<Problem, INSTRUMENT, DEBUG, SIZE_CHECK> wtf_enactor(gpu_idx);
// Allocate problem on GPU
Problem *problem = new Problem;
util::GRError(problem->Init(
stream_from_host,
*csr,
num_gpus),
"Problem WTF Initialization Failed", __FILE__, __LINE__);
// Perform WTF
GpuTimer gpu_timer;
util::GRError(
problem->Reset(
src, delta, alpha, error, wtf_enactor.GetFrontierType()),
"pr Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
util::GRError(
wtf_enactor.template Enact<Problem>(
*context, src, alpha, problem, max_iter, max_grid_size),
"HITS Problem Enact Failed", __FILE__, __LINE__);
gpu_timer.Stop();
float elapsed = gpu_timer.ElapsedMillis();
// Copy out results
util::GRError(
problem->Extract(h_rank, h_node_id),
"HITS Problem Data Extraction Failed", __FILE__, __LINE__);
float total_pr = 0;
for (int i = 0; i < csr->nodes; ++i)
{
total_pr += h_rank[i];
}
//
// Compute reference CPU HITS solution for source-distance
//
if (reference_check != NULL && total_pr > 0)
{
if (!quiet_mode) printf("compute ref value\n");
SimpleReferenceWTF(
*csr,
src,
reference_node_id,
reference_check,
delta,
alpha,
max_iter);
if (!quiet_mode) printf("\n");
}
// Verify the result
if (reference_check != NULL && total_pr > 0)
{
if (!quiet_mode) printf("Validity: ");
CompareResults(h_rank, reference_check, csr->nodes, true);
}
if (!quiet_mode) {
printf("\nGPU result.");
DisplaySolution(h_node_id, h_rank, csr->nodes);
}
info->ComputeCommonStats(wtf_enactor.enactor_stats.GetPointer(), elapsed);
if (!quiet_mode)
info->DisplayStats();
info->CollectInfo();
// Cleanup
if (problem) delete problem;
if (reference_check) free(reference_check);
if (h_rank) free(h_rank);
cudaDeviceSynchronize();
}
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG>
void RunTests_size_check(Info<VertexId, Value, SizeT> *info)
{
if (info->info["size_check"].get_bool()) RunTests
<VertexId, Value, SizeT, INSTRUMENT, DEBUG,
true > (info);
else RunTests
<VertexId, Value, SizeT, INSTRUMENT, DEBUG,
false> (info);
}
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests_debug(Info<VertexId, Value, SizeT> *info)
{
if (info->info["debug_mode"].get_bool()) RunTests_size_check
<VertexId, Value, SizeT, INSTRUMENT,
true > (info);
else RunTests_size_check
<VertexId, Value, SizeT, INSTRUMENT,
false> (info);
}
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests_instrumented(Info<VertexId, Value, SizeT> *info)
{
if (info->info["instrument"].get_bool()) RunTests_debug
<VertexId, Value, SizeT,
true > (info);
else RunTests_debug
<VertexId, Value, SizeT,
false> (info);
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
CommandLineArgs args(argc, argv);
int graph_args = argc - args.ParsedArgc() - 1;
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
typedef int VertexId; // Use as the node identifier
typedef float Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
Info<VertexId, Value, SizeT> *info = new Info<VertexId, Value, SizeT>;
info->info["undirected"] = args.CheckCmdLineFlag("undirected");
info->Init("WTF", args, csr);
RunTests_instrumented<VertexId, Value, SizeT>(info);
return 0;
}
|
29c17d9c11c0b56c5e7ae958291963fecc791ad9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "rbbox_overlaps.hpp"
#include <vector>
#include <iostream>
#include <cmath>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float trangle_area(float * a, float * b, float * c) {
return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0]))/2.0;
}
__device__ inline float area(float * int_pts, int num_of_inter) {
float area = 0.0;
for(int i = 0;i < num_of_inter - 2;i++) {
area += fabs(trangle_area(int_pts, int_pts + 2 * i + 2, int_pts + 2 * i + 4));
}
return area;
}
__device__ inline void reorder_pts(float * int_pts, int num_of_inter) {
if(num_of_inter > 0) {
float center[2];
center[0] = 0.0;
center[1] = 0.0;
for(int i = 0;i < num_of_inter;i++) {
center[0] += int_pts[2 * i];
center[1] += int_pts[2 * i + 1];
}
center[0] /= num_of_inter;
center[1] /= num_of_inter;
float vs[16];
float v[2];
float d;
for(int i = 0;i < num_of_inter;i++) {
v[0] = int_pts[2 * i]-center[0];
v[1] = int_pts[2 * i + 1]-center[1];
d = sqrt(v[0] * v[0] + v[1] * v[1]);
v[0] = v[0] / d;
v[1] = v[1] / d;
if(v[1] < 0) {
v[0]= - 2 - v[0];
}
vs[i] = v[0];
}
float temp,tx,ty;
int j;
for(int i=1;i<num_of_inter;++i){
if(vs[i-1]>vs[i]){
temp = vs[i];
tx = int_pts[2*i];
ty = int_pts[2*i+1];
j=i;
while(j>0&&vs[j-1]>temp){
vs[j] = vs[j-1];
int_pts[j*2] = int_pts[j*2-2];
int_pts[j*2+1] = int_pts[j*2-1];
j--;
}
vs[j] = temp;
int_pts[j*2] = tx;
int_pts[j*2+1] = ty;
}
}
}
}
__device__ inline bool inter2line(float * pts1, float *pts2, int i, int j, float * temp_pts) {
float a[2];
float b[2];
float c[2];
float d[2];
float area_abc, area_abd, area_cda, area_cdb;
a[0] = pts1[2 * i];
a[1] = pts1[2 * i + 1];
b[0] = pts1[2 * ((i + 1) % 4)];
b[1] = pts1[2 * ((i + 1) % 4) + 1];
c[0] = pts2[2 * j];
c[1] = pts2[2 * j + 1];
d[0] = pts2[2 * ((j + 1) % 4)];
d[1] = pts2[2 * ((j + 1) % 4) + 1];
area_abc = trangle_area(a, b, c);
area_abd = trangle_area(a, b, d);
if(area_abc * area_abd >= -1e-5) {
return false;
}
area_cda = trangle_area(c, d, a);
area_cdb = area_cda + area_abc - area_abd;
if (area_cda * area_cdb >= -1e-5) {
return false;
}
float t = area_cda / (area_abd - area_abc);
float dx = t * (b[0] - a[0]);
float dy = t * (b[1] - a[1]);
temp_pts[0] = a[0] + dx;
temp_pts[1] = a[1] + dy;
return true;
}
__device__ inline bool inrect(float pt_x, float pt_y, float * pts) {
double ab[2];
double ad[2];
double ap[2];
double abab;
double abap;
double adad;
double adap;
ab[0] = pts[2] - pts[0];
ab[1] = pts[3] - pts[1];
ad[0] = pts[6] - pts[0];
ad[1] = pts[7] - pts[1];
ap[0] = pt_x - pts[0];
ap[1] = pt_y - pts[1];
abab = ab[0] * ab[0] + ab[1] * ab[1];
abap = ab[0] * ap[0] + ab[1] * ap[1];
adad = ad[0] * ad[0] + ad[1] * ad[1];
adap = ad[0] * ap[0] + ad[1] * ap[1];
bool result = (abab - abap >= -1) && (abap >= -1) && (adad - adap >= -1) && (adap >= -1);
return result;
}
__device__ inline int inter_pts(float * pts1, float * pts2, float * int_pts) {
int num_of_inter = 0;
for(int i = 0;i < 4;i++) {
if(inrect(pts1[2 * i], pts1[2 * i + 1], pts2)) {
int_pts[num_of_inter * 2] = pts1[2 * i];
int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1];
num_of_inter++;
}
if(inrect(pts2[2 * i], pts2[2 * i + 1], pts1)) {
int_pts[num_of_inter * 2] = pts2[2 * i];
int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1];
num_of_inter++;
}
}
float temp_pts[2];
for(int i = 0;i < 4;i++) {
for(int j = 0;j < 4;j++) {
bool has_pts = inter2line(pts1, pts2, i, j, temp_pts);
if(has_pts) {
int_pts[num_of_inter * 2] = temp_pts[0];
int_pts[num_of_inter * 2 + 1] = temp_pts[1];
num_of_inter++;
}
}
}
return num_of_inter;
}
__device__ inline void convert_region(float * pts , float const * const region) {
float angle = region[4];
float a_cos = cos(angle/180.0*3.1415926535);
float a_sin = sin(angle/180.0*3.1415926535);
float ctr_x = region[0];
float ctr_y = region[1];
float w = region[2];
float h = region[3];
float pts_x[4];
float pts_y[4];
pts_x[0] = - w / 2;
pts_x[1] = w / 2;
pts_x[2] = w / 2;
pts_x[3] = - w / 2;
pts_y[0] = - h / 2;
pts_y[1] = - h / 2;
pts_y[2] = h / 2;
pts_y[3] = h / 2;
for(int i = 0;i < 4;i++) {
pts[7 - 2 * i - 1] = a_cos * pts_x[i] - a_sin * pts_y[i] + ctr_x;
pts[7 - 2 * i] = a_sin * pts_x[i] + a_cos * pts_y[i] + ctr_y;
}
}
__device__ inline float inter(float const * const region1, float const * const region2) {
float pts1[8];
float pts2[8];
float int_pts[16];
int num_of_inter;
convert_region(pts1, region1);
convert_region(pts2, region2);
num_of_inter = inter_pts(pts1, pts2, int_pts);
reorder_pts(int_pts, num_of_inter);
return area(int_pts, num_of_inter);
}
__device__ inline float devRotateIoU(float const * const region1, float const * const region2) {
if((fabs(region1[0] - region2[0]) < 1e-5) && (fabs(region1[1] - region2[1]) < 1e-5) && (fabs(region1[2] - region2[2]) < 1e-5) && (fabs(region1[3] - region2[3]) < 1e-5) && (fabs(region1[4] - region2[4]) < 1e-5)) {
return 1.0;
}
float area1 = region1[2] * region1[3];
float area2 = region2[2] * region2[3];
float area_inter = inter(region1, region2);
float result = area_inter / (area1 + area2 - area_inter);
if(result < 0) {
result = 0.0;
}
return result;
}
__global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes,
const float * dev_query_boxes, float* dev_overlaps) {
const int col_start = blockIdx.y;
const int row_start = blockIdx.x;
const int row_size =
min(N - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(K - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
__shared__ float block_query_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_query_boxes[threadIdx.x * 5 + 0] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_query_boxes[threadIdx.x * 5 + 1] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_query_boxes[threadIdx.x * 5 + 2] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_query_boxes[threadIdx.x * 5 + 3] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_query_boxes[threadIdx.x * 5 + 4] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
if (threadIdx.x < row_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
for(int i = 0;i < col_size; i++) {
int offset = row_start*threadsPerBlock * K + col_start*threadsPerBlock + threadIdx.x*K+ i ;
dev_overlaps[offset] = devRotateIoU(block_boxes + threadIdx.x * 5, block_query_boxes + i * 5);
}
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(hipGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to hipSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(hipSetDevice(device_id));
}
void _overlaps(float* overlaps,const float* boxes,const float* query_boxes, int n, int k, int device_id) {
_set_device(device_id);
float* overlaps_dev = NULL;
float* boxes_dev = NULL;
float* query_boxes_dev = NULL;
CUDA_CHECK(hipMalloc(&boxes_dev,
n * 5 * sizeof(float)));
CUDA_CHECK(hipMemcpy(boxes_dev,
boxes,
n * 5 * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&query_boxes_dev,
k * 5 * sizeof(float)));
CUDA_CHECK(hipMemcpy(query_boxes_dev,
query_boxes,
k * 5 * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&overlaps_dev,
n * k * sizeof(float)));
if (true){}
dim3 blocks(DIVUP(n, threadsPerBlock),
DIVUP(k, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( overlaps_kernel), dim3(blocks), dim3(threads), 0, 0, n, k,
boxes_dev,
query_boxes_dev,
overlaps_dev);
CUDA_CHECK(hipMemcpy(overlaps,
overlaps_dev,
n * k * sizeof(float),
hipMemcpyDeviceToHost));
CUDA_CHECK(hipFree(overlaps_dev));
CUDA_CHECK(hipFree(boxes_dev));
CUDA_CHECK(hipFree(query_boxes_dev));
}
| 29c17d9c11c0b56c5e7ae958291963fecc791ad9.cu |
#include "rbbox_overlaps.hpp"
#include <vector>
#include <iostream>
#include <cmath>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float trangle_area(float * a, float * b, float * c) {
return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * (b[0] - c[0]))/2.0;
}
__device__ inline float area(float * int_pts, int num_of_inter) {
float area = 0.0;
for(int i = 0;i < num_of_inter - 2;i++) {
area += fabs(trangle_area(int_pts, int_pts + 2 * i + 2, int_pts + 2 * i + 4));
}
return area;
}
__device__ inline void reorder_pts(float * int_pts, int num_of_inter) {
if(num_of_inter > 0) {
float center[2];
center[0] = 0.0;
center[1] = 0.0;
for(int i = 0;i < num_of_inter;i++) {
center[0] += int_pts[2 * i];
center[1] += int_pts[2 * i + 1];
}
center[0] /= num_of_inter;
center[1] /= num_of_inter;
float vs[16];
float v[2];
float d;
for(int i = 0;i < num_of_inter;i++) {
v[0] = int_pts[2 * i]-center[0];
v[1] = int_pts[2 * i + 1]-center[1];
d = sqrt(v[0] * v[0] + v[1] * v[1]);
v[0] = v[0] / d;
v[1] = v[1] / d;
if(v[1] < 0) {
v[0]= - 2 - v[0];
}
vs[i] = v[0];
}
float temp,tx,ty;
int j;
for(int i=1;i<num_of_inter;++i){
if(vs[i-1]>vs[i]){
temp = vs[i];
tx = int_pts[2*i];
ty = int_pts[2*i+1];
j=i;
while(j>0&&vs[j-1]>temp){
vs[j] = vs[j-1];
int_pts[j*2] = int_pts[j*2-2];
int_pts[j*2+1] = int_pts[j*2-1];
j--;
}
vs[j] = temp;
int_pts[j*2] = tx;
int_pts[j*2+1] = ty;
}
}
}
}
__device__ inline bool inter2line(float * pts1, float *pts2, int i, int j, float * temp_pts) {
float a[2];
float b[2];
float c[2];
float d[2];
float area_abc, area_abd, area_cda, area_cdb;
a[0] = pts1[2 * i];
a[1] = pts1[2 * i + 1];
b[0] = pts1[2 * ((i + 1) % 4)];
b[1] = pts1[2 * ((i + 1) % 4) + 1];
c[0] = pts2[2 * j];
c[1] = pts2[2 * j + 1];
d[0] = pts2[2 * ((j + 1) % 4)];
d[1] = pts2[2 * ((j + 1) % 4) + 1];
area_abc = trangle_area(a, b, c);
area_abd = trangle_area(a, b, d);
if(area_abc * area_abd >= -1e-5) {
return false;
}
area_cda = trangle_area(c, d, a);
area_cdb = area_cda + area_abc - area_abd;
if (area_cda * area_cdb >= -1e-5) {
return false;
}
float t = area_cda / (area_abd - area_abc);
float dx = t * (b[0] - a[0]);
float dy = t * (b[1] - a[1]);
temp_pts[0] = a[0] + dx;
temp_pts[1] = a[1] + dy;
return true;
}
__device__ inline bool inrect(float pt_x, float pt_y, float * pts) {
double ab[2];
double ad[2];
double ap[2];
double abab;
double abap;
double adad;
double adap;
ab[0] = pts[2] - pts[0];
ab[1] = pts[3] - pts[1];
ad[0] = pts[6] - pts[0];
ad[1] = pts[7] - pts[1];
ap[0] = pt_x - pts[0];
ap[1] = pt_y - pts[1];
abab = ab[0] * ab[0] + ab[1] * ab[1];
abap = ab[0] * ap[0] + ab[1] * ap[1];
adad = ad[0] * ad[0] + ad[1] * ad[1];
adap = ad[0] * ap[0] + ad[1] * ap[1];
bool result = (abab - abap >= -1) && (abap >= -1) && (adad - adap >= -1) && (adap >= -1);
return result;
}
__device__ inline int inter_pts(float * pts1, float * pts2, float * int_pts) {
int num_of_inter = 0;
for(int i = 0;i < 4;i++) {
if(inrect(pts1[2 * i], pts1[2 * i + 1], pts2)) {
int_pts[num_of_inter * 2] = pts1[2 * i];
int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1];
num_of_inter++;
}
if(inrect(pts2[2 * i], pts2[2 * i + 1], pts1)) {
int_pts[num_of_inter * 2] = pts2[2 * i];
int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1];
num_of_inter++;
}
}
float temp_pts[2];
for(int i = 0;i < 4;i++) {
for(int j = 0;j < 4;j++) {
bool has_pts = inter2line(pts1, pts2, i, j, temp_pts);
if(has_pts) {
int_pts[num_of_inter * 2] = temp_pts[0];
int_pts[num_of_inter * 2 + 1] = temp_pts[1];
num_of_inter++;
}
}
}
return num_of_inter;
}
__device__ inline void convert_region(float * pts , float const * const region) {
float angle = region[4];
float a_cos = cos(angle/180.0*3.1415926535);
float a_sin = sin(angle/180.0*3.1415926535);
float ctr_x = region[0];
float ctr_y = region[1];
float w = region[2];
float h = region[3];
float pts_x[4];
float pts_y[4];
pts_x[0] = - w / 2;
pts_x[1] = w / 2;
pts_x[2] = w / 2;
pts_x[3] = - w / 2;
pts_y[0] = - h / 2;
pts_y[1] = - h / 2;
pts_y[2] = h / 2;
pts_y[3] = h / 2;
for(int i = 0;i < 4;i++) {
pts[7 - 2 * i - 1] = a_cos * pts_x[i] - a_sin * pts_y[i] + ctr_x;
pts[7 - 2 * i] = a_sin * pts_x[i] + a_cos * pts_y[i] + ctr_y;
}
}
__device__ inline float inter(float const * const region1, float const * const region2) {
float pts1[8];
float pts2[8];
float int_pts[16];
int num_of_inter;
convert_region(pts1, region1);
convert_region(pts2, region2);
num_of_inter = inter_pts(pts1, pts2, int_pts);
reorder_pts(int_pts, num_of_inter);
return area(int_pts, num_of_inter);
}
__device__ inline float devRotateIoU(float const * const region1, float const * const region2) {
if((fabs(region1[0] - region2[0]) < 1e-5) && (fabs(region1[1] - region2[1]) < 1e-5) && (fabs(region1[2] - region2[2]) < 1e-5) && (fabs(region1[3] - region2[3]) < 1e-5) && (fabs(region1[4] - region2[4]) < 1e-5)) {
return 1.0;
}
float area1 = region1[2] * region1[3];
float area2 = region2[2] * region2[3];
float area_inter = inter(region1, region2);
float result = area_inter / (area1 + area2 - area_inter);
if(result < 0) {
result = 0.0;
}
return result;
}
__global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes,
const float * dev_query_boxes, float* dev_overlaps) {
const int col_start = blockIdx.y;
const int row_start = blockIdx.x;
const int row_size =
min(N - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(K - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
__shared__ float block_query_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_query_boxes[threadIdx.x * 5 + 0] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_query_boxes[threadIdx.x * 5 + 1] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_query_boxes[threadIdx.x * 5 + 2] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_query_boxes[threadIdx.x * 5 + 3] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_query_boxes[threadIdx.x * 5 + 4] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
if (threadIdx.x < row_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
for(int i = 0;i < col_size; i++) {
int offset = row_start*threadsPerBlock * K + col_start*threadsPerBlock + threadIdx.x*K+ i ;
dev_overlaps[offset] = devRotateIoU(block_boxes + threadIdx.x * 5, block_query_boxes + i * 5);
}
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(cudaGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(cudaSetDevice(device_id));
}
void _overlaps(float* overlaps,const float* boxes,const float* query_boxes, int n, int k, int device_id) {
_set_device(device_id);
float* overlaps_dev = NULL;
float* boxes_dev = NULL;
float* query_boxes_dev = NULL;
CUDA_CHECK(cudaMalloc(&boxes_dev,
n * 5 * sizeof(float)));
CUDA_CHECK(cudaMemcpy(boxes_dev,
boxes,
n * 5 * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&query_boxes_dev,
k * 5 * sizeof(float)));
CUDA_CHECK(cudaMemcpy(query_boxes_dev,
query_boxes,
k * 5 * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&overlaps_dev,
n * k * sizeof(float)));
if (true){}
dim3 blocks(DIVUP(n, threadsPerBlock),
DIVUP(k, threadsPerBlock));
dim3 threads(threadsPerBlock);
overlaps_kernel<<<blocks, threads>>>(n, k,
boxes_dev,
query_boxes_dev,
overlaps_dev);
CUDA_CHECK(cudaMemcpy(overlaps,
overlaps_dev,
n * k * sizeof(float),
cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaFree(overlaps_dev));
CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(query_boxes_dev));
}
|
2978cacd7be0830bcecfb1383d92b92fa03d30c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define FILENAME(line) FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/manual_awkward_IndexedArray_getitem_nextcarry_outindex.cu", line)
#include "standard_parallel_algorithms.h"
#include "awkward/kernels.h"
template <typename C, typename T>
__global__
void awkward_IndexedArray_getitem_nextcarry_outindex_kernel(
T* tocarry,
C* toindex,
const C* fromindex,
int64_t* prefixedsum_mask,
int64_t lenindex,
int64_t lencontent,
unsigned long long* error_i) {
/**
* Here the thread_id has a unsigned long long data type rather than a int64_t
* type because atomicMin doesn't provide a fucntion signature for int64_t type
*/
unsigned long long thread_id = blockIdx.x * blockDim.x + threadIdx.x;
C j = fromindex[thread_id];
if (j >= lencontent) {
atomicMin(error_i, thread_id);
}
else if (j < 0) {
toindex[thread_id] = -1;
}
else {
tocarry[prefixedsum_mask[thread_id] - 1] = j;
toindex[thread_id] = (C)(prefixedsum_mask[thread_id] - 1);
}
}
template <typename C>
__global__ void
awkward_IndexedArray_getitem_nextcarry_outindex_filter_mask(
const C* fromindex,
int8_t* filtered_mask,
int64_t lenindex,
int64_t lencontent) {
int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_id < lenindex) {
if (fromindex[thread_id] < lencontent && fromindex[thread_id] >= 0) {
filtered_mask[thread_id] = 1;
}
}
}
__global__ void
awkward_IndexedArray_getitem_nextcarry_outindex_initialize_error_i(
unsigned long long* error_i,
unsigned long long value) {
*error_i = value;
}
template <typename C, typename T>
ERROR awkward_IndexedArray_getitem_nextcarry_outindex(
T* tocarry,
C* toindex,
const C* fromindex,
int64_t lenindex,
int64_t lencontent) {
dim3 blocks_per_grid = blocks(lenindex);
dim3 threads_per_block = threads(lenindex);
int8_t* filtered_mask;
int64_t* res_temp;
HANDLE_ERROR(hipMalloc((void**)&filtered_mask, sizeof(int8_t) * lenindex));
HANDLE_ERROR(hipMalloc((void**)&res_temp, sizeof(int64_t) * lenindex));
HANDLE_ERROR(hipMemset(filtered_mask, 0, sizeof(int8_t) * lenindex));
hipLaunchKernelGGL(( awkward_IndexedArray_getitem_nextcarry_outindex_filter_mask<C>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0,
fromindex,
filtered_mask,
lenindex,
lencontent);
exclusive_scan(res_temp, filtered_mask, lenindex);
unsigned long long * dev_error_i;
unsigned long long error_i;
HANDLE_ERROR(hipMalloc((void**)&dev_error_i, sizeof(unsigned long long)));
hipLaunchKernelGGL(( awkward_IndexedArray_getitem_nextcarry_outindex_initialize_error_i), dim3(1),dim3(1), 0, 0,
dev_error_i,
lenindex + 1);
hipLaunchKernelGGL(( awkward_IndexedArray_getitem_nextcarry_outindex_kernel<C, T>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0,
tocarry,
toindex,
fromindex,
res_temp,
lenindex,
lencontent,
dev_error_i);
HANDLE_ERROR(hipMemcpy(&error_i, dev_error_i, sizeof(unsigned long long), hipMemcpyDeviceToHost));
if(error_i != lenindex + 1) {
C error_j;
HANDLE_ERROR(hipMemcpy(&error_j, fromindex + error_i, sizeof(C), hipMemcpyDeviceToHost));
return failure("index out of range", error_i, error_j, FILENAME(__LINE__));
}
return success();
}
ERROR awkward_IndexedArray32_getitem_nextcarry_outindex_64(
int64_t* tocarry,
int32_t* toindex,
const int32_t* fromindex,
int64_t lenindex,
int64_t lencontent) {
return awkward_IndexedArray_getitem_nextcarry_outindex<int32_t, int64_t>(
tocarry,
toindex,
fromindex,
lenindex,
lencontent);
}
ERROR awkward_IndexedArrayU32_getitem_nextcarry_outindex_64(
int64_t* tocarry,
uint32_t* toindex,
const uint32_t* fromindex,
int64_t lenindex,
int64_t lencontent) {
return awkward_IndexedArray_getitem_nextcarry_outindex<uint32_t, int64_t>(
tocarry,
toindex,
fromindex,
lenindex,
lencontent);
}
ERROR awkward_IndexedArray64_getitem_nextcarry_outindex_64(
int64_t* tocarry,
int64_t* toindex,
const int64_t* fromindex,
int64_t lenindex,
int64_t lencontent) {
return awkward_IndexedArray_getitem_nextcarry_outindex<int64_t, int64_t>(
tocarry,
toindex,
fromindex,
lenindex,
lencontent);
}
| 2978cacd7be0830bcecfb1383d92b92fa03d30c8.cu | #define FILENAME(line) FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/manual_awkward_IndexedArray_getitem_nextcarry_outindex.cu", line)
#include "standard_parallel_algorithms.h"
#include "awkward/kernels.h"
template <typename C, typename T>
__global__
void awkward_IndexedArray_getitem_nextcarry_outindex_kernel(
T* tocarry,
C* toindex,
const C* fromindex,
int64_t* prefixedsum_mask,
int64_t lenindex,
int64_t lencontent,
unsigned long long* error_i) {
/**
* Here the thread_id has a unsigned long long data type rather than a int64_t
* type because atomicMin doesn't provide a fucntion signature for int64_t type
*/
unsigned long long thread_id = blockIdx.x * blockDim.x + threadIdx.x;
C j = fromindex[thread_id];
if (j >= lencontent) {
atomicMin(error_i, thread_id);
}
else if (j < 0) {
toindex[thread_id] = -1;
}
else {
tocarry[prefixedsum_mask[thread_id] - 1] = j;
toindex[thread_id] = (C)(prefixedsum_mask[thread_id] - 1);
}
}
template <typename C>
__global__ void
awkward_IndexedArray_getitem_nextcarry_outindex_filter_mask(
const C* fromindex,
int8_t* filtered_mask,
int64_t lenindex,
int64_t lencontent) {
int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_id < lenindex) {
if (fromindex[thread_id] < lencontent && fromindex[thread_id] >= 0) {
filtered_mask[thread_id] = 1;
}
}
}
__global__ void
awkward_IndexedArray_getitem_nextcarry_outindex_initialize_error_i(
unsigned long long* error_i,
unsigned long long value) {
*error_i = value;
}
template <typename C, typename T>
ERROR awkward_IndexedArray_getitem_nextcarry_outindex(
T* tocarry,
C* toindex,
const C* fromindex,
int64_t lenindex,
int64_t lencontent) {
dim3 blocks_per_grid = blocks(lenindex);
dim3 threads_per_block = threads(lenindex);
int8_t* filtered_mask;
int64_t* res_temp;
HANDLE_ERROR(cudaMalloc((void**)&filtered_mask, sizeof(int8_t) * lenindex));
HANDLE_ERROR(cudaMalloc((void**)&res_temp, sizeof(int64_t) * lenindex));
HANDLE_ERROR(cudaMemset(filtered_mask, 0, sizeof(int8_t) * lenindex));
awkward_IndexedArray_getitem_nextcarry_outindex_filter_mask<C><<<blocks_per_grid, threads_per_block>>>(
fromindex,
filtered_mask,
lenindex,
lencontent);
exclusive_scan(res_temp, filtered_mask, lenindex);
unsigned long long * dev_error_i;
unsigned long long error_i;
HANDLE_ERROR(cudaMalloc((void**)&dev_error_i, sizeof(unsigned long long)));
awkward_IndexedArray_getitem_nextcarry_outindex_initialize_error_i<<<1,1>>>(
dev_error_i,
lenindex + 1);
awkward_IndexedArray_getitem_nextcarry_outindex_kernel<C, T><<<blocks_per_grid, threads_per_block>>>(
tocarry,
toindex,
fromindex,
res_temp,
lenindex,
lencontent,
dev_error_i);
HANDLE_ERROR(cudaMemcpy(&error_i, dev_error_i, sizeof(unsigned long long), cudaMemcpyDeviceToHost));
if(error_i != lenindex + 1) {
C error_j;
HANDLE_ERROR(cudaMemcpy(&error_j, fromindex + error_i, sizeof(C), cudaMemcpyDeviceToHost));
return failure("index out of range", error_i, error_j, FILENAME(__LINE__));
}
return success();
}
ERROR awkward_IndexedArray32_getitem_nextcarry_outindex_64(
int64_t* tocarry,
int32_t* toindex,
const int32_t* fromindex,
int64_t lenindex,
int64_t lencontent) {
return awkward_IndexedArray_getitem_nextcarry_outindex<int32_t, int64_t>(
tocarry,
toindex,
fromindex,
lenindex,
lencontent);
}
ERROR awkward_IndexedArrayU32_getitem_nextcarry_outindex_64(
int64_t* tocarry,
uint32_t* toindex,
const uint32_t* fromindex,
int64_t lenindex,
int64_t lencontent) {
return awkward_IndexedArray_getitem_nextcarry_outindex<uint32_t, int64_t>(
tocarry,
toindex,
fromindex,
lenindex,
lencontent);
}
ERROR awkward_IndexedArray64_getitem_nextcarry_outindex_64(
int64_t* tocarry,
int64_t* toindex,
const int64_t* fromindex,
int64_t lenindex,
int64_t lencontent) {
return awkward_IndexedArray_getitem_nextcarry_outindex<int64_t, int64_t>(
tocarry,
toindex,
fromindex,
lenindex,
lencontent);
}
|
2b205d57185a24ad5dd7154823d320f3ea8f2d62.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 XGBoost contributors
*/
#include "./host_device_vector.h"
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "device_helpers_hip.cuh"
namespace xgboost {
// the handler to call instead of hipSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
// wrapper over access with useful methods
class Permissions {
GPUAccess access_;
explicit Permissions(GPUAccess access) : access_(access) {}
public:
Permissions() : access_(GPUAccess::kNone) {}
explicit Permissions(bool perm)
: access_(perm ? GPUAccess::kWrite : GPUAccess::kNone) {}
bool CanRead() const { return access_ >= kRead; }
bool CanWrite() const { return access_ == kWrite; }
bool CanAccess(GPUAccess access) const { return access_ >= access; }
void Grant(GPUAccess access) { access_ = ::max(access_, access); }
void DenyComplementary(GPUAccess compl_access) {
access_ = ::min(access_, GPUAccess::kWrite - compl_access);
}
Permissions Complementary() const {
return Permissions(GPUAccess::kWrite - access_);
}
};
template <typename T>
struct HostDeviceVectorImpl {
struct DeviceShard {
DeviceShard()
: proper_size_(0), device_(-1), start_(0), perm_d_(false),
cached_size_(~0), vec_(nullptr) {}
void Init(HostDeviceVectorImpl<T>* vec, int device) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = device;
LazyResize(vec_->Size());
perm_d_ = vec_->perm_h_.Complementary();
}
void Init(HostDeviceVectorImpl<T>* vec, const DeviceShard& other) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = other.device_;
cached_size_ = other.cached_size_;
start_ = other.start_;
proper_size_ = other.proper_size_;
SetDevice();
data_.resize(other.data_.size());
perm_d_ = other.perm_d_;
}
void ScatterFrom(const T* begin) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
dh::safe_cuda(hipMemcpy(data_.data().get(), begin + start_,
data_.size() * sizeof(T), hipMemcpyDefault));
}
void GatherTo(thrust::device_ptr<T> begin) {
LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(hipMemcpy(begin.get() + start_, data_.data().get(),
proper_size_ * sizeof(T), hipMemcpyDefault));
}
void Fill(T v) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
thrust::fill(data_.begin(), data_.end(), v);
}
void Copy(DeviceShard* other) {
// TODO(canonizer): avoid full copy of host data for this (but not for other)
LazySyncDevice(GPUAccess::kWrite);
other->LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(hipMemcpy(data_.data().get(), other->data_.data().get(),
data_.size() * sizeof(T), hipMemcpyDefault));
}
void LazySyncHost(GPUAccess access) {
SetDevice();
dh::safe_cuda(hipMemcpy(vec_->data_h_.data() + start_,
data_.data().get(), proper_size_ * sizeof(T),
hipMemcpyDeviceToHost));
perm_d_.DenyComplementary(access);
}
void LazyResize(size_t new_size) {
if (new_size == cached_size_) { return; }
// resize is required
int ndevices = vec_->distribution_.devices_.Size();
int device_index = vec_->distribution_.devices_.Index(device_);
start_ = vec_->distribution_.ShardStart(new_size, device_index);
proper_size_ = vec_->distribution_.ShardProperSize(new_size, device_index);
// The size on this device.
size_t size_d = vec_->distribution_.ShardSize(new_size, device_index);
SetDevice();
data_.resize(size_d);
cached_size_ = new_size;
}
void LazySyncDevice(GPUAccess access) {
if (perm_d_.CanAccess(access)) { return; }
if (perm_d_.CanRead()) {
// deny read to the host
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
return;
}
// data is on the host
size_t size_h = vec_->data_h_.size();
LazyResize(size_h);
SetDevice();
dh::safe_cuda(
hipMemcpy(data_.data().get(), vec_->data_h_.data() + start_,
data_.size() * sizeof(T), hipMemcpyHostToDevice));
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
vec_->size_d_ = size_h;
}
void SetDevice() {
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(hipSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
}
int device_;
thrust::device_vector<T> data_;
// cached vector size
size_t cached_size_;
size_t start_;
// size of the portion to copy back to the host
size_t proper_size_;
Permissions perm_d_;
HostDeviceVectorImpl<T>* vec_;
};
HostDeviceVectorImpl(size_t size, T v, GPUDistribution distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = size;
InitShards();
Fill(v);
} else {
data_h_.resize(size, v);
}
}
// required, as a new std::mutex has to be created
HostDeviceVectorImpl(const HostDeviceVectorImpl<T>& other)
: data_h_(other.data_h_), perm_h_(other.perm_h_), size_d_(other.size_d_),
distribution_(other.distribution_), mutex_() {
shards_.resize(other.shards_.size());
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, other.shards_.at(i));
});
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, GPUDistribution distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = init.size();
InitShards();
Copy(init);
} else {
data_h_ = init;
}
}
void InitShards() {
int ndevices = distribution_.devices_.Size();
shards_.resize(ndevices);
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, distribution_.devices_.DeviceId(i));
});
}
size_t Size() const { return perm_h_.CanRead() ? data_h_.size() : size_d_; }
GPUSet Devices() const { return distribution_.devices_; }
const GPUDistribution& Distribution() const { return distribution_; }
T* DevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return shards_.at(distribution_.devices_.Index(device)).data_.data().get();
}
const T* ConstDevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).data_.data().get();
}
common::Span<T> DeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return {shards_.at(devices.Index(device)).data_.data().get(),
static_cast<typename common::Span<T>::index_type>(DeviceSize(device))};
}
common::Span<const T> ConstDeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return {shards_.at(devices.Index(device)).data_.data().get(),
static_cast<typename common::Span<const T>::index_type>(DeviceSize(device))};
}
size_t DeviceSize(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).data_.size();
}
size_t DeviceStart(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).start_;
}
thrust::device_ptr<T> tbegin(int device) { // NOLINT
return thrust::device_ptr<T>(DevicePointer(device));
}
thrust::device_ptr<const T> tcbegin(int device) { // NOLINT
return thrust::device_ptr<const T>(ConstDevicePointer(device));
}
thrust::device_ptr<T> tend(int device) { // NOLINT
return tbegin(device) + DeviceSize(device);
}
thrust::device_ptr<const T> tcend(int device) { // NOLINT
return tcbegin(device) + DeviceSize(device);
}
void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(hipMemcpy(data_h_.data(), begin.get(),
(end - begin) * sizeof(T),
hipMemcpyDeviceToHost));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(begin.get());
});
}
}
void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(hipMemcpy(begin.get(), data_h_.data(),
data_h_.size() * sizeof(T),
hipMemcpyHostToDevice));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.GatherTo(begin); });
}
}
void Fill(T v) {
if (perm_h_.CanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Fill(v); });
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
// Data is on host.
if (perm_h_.CanWrite() && other->perm_h_.CanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
// Data is on device;
if (distribution_ != other->distribution_) {
distribution_ = GPUDistribution();
Reshard(other->Distribution());
size_d_ = other->size_d_;
}
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Copy(&other->shards_.at(i));
});
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.data());
});
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.begin());
});
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kWrite);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void Reshard(const GPUDistribution& distribution) {
if (distribution_ == distribution) { return; }
CHECK(distribution_.IsEmpty() || distribution.IsEmpty());
if (distribution.IsEmpty()) {
LazySyncHost(GPUAccess::kWrite);
}
distribution_ = distribution;
InitShards();
}
void Reshard(GPUSet new_devices) {
if (distribution_.Devices() == new_devices) { return; }
Reshard(GPUDistribution::Block(new_devices));
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if (distribution_.IsFixedSize()) {
CHECK_EQ(new_size, distribution_.offsets_.back());
}
if (Size() == 0 && !distribution_.IsEmpty()) {
// fast on-device resize
perm_h_ = Permissions(false);
size_d_ = new_size;
InitShards();
Fill(v);
} else {
// resize on host
LazySyncHost(GPUAccess::kWrite);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (perm_h_.CanAccess(access)) { return; }
if (perm_h_.CanRead()) {
// data is present, just need to deny access to the device
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.perm_d_.DenyComplementary(access);
});
perm_h_.Grant(access);
return;
}
if (data_h_.size() != size_d_) { data_h_.resize(size_d_); }
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.LazySyncHost(access);
});
perm_h_.Grant(access);
}
void LazySyncDevice(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
CHECK(devices.Contains(device));
shards_.at(devices.Index(device)).LazySyncDevice(access);
}
bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); }
bool DeviceCanAccess(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
if (!devices.Contains(device)) { return false; }
return shards_.at(devices.Index(device)).perm_d_.CanAccess(access);
}
std::vector<T> data_h_;
Permissions perm_h_;
// the total size of the data stored on the devices
size_t size_d_;
GPUDistribution distribution_;
// protects size_d_ and perm_h_ when updated from multiple threads
std::mutex mutex_;
std::vector<DeviceShard> shards_;
};
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(size_t size, T v, GPUDistribution distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(size, v, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(std::initializer_list<T> init, GPUDistribution distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(const std::vector<T>& init, GPUDistribution distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other)
: impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(*other.impl_);
}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=
(const HostDeviceVector<T>& other) {
if (this == &other) { return *this; }
delete impl_;
impl_ = new HostDeviceVectorImpl<T>(*other.impl_);
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
HostDeviceVectorImpl<T>* tmp = impl_;
impl_ = nullptr;
delete tmp;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); }
template <typename T>
const GPUDistribution& HostDeviceVector<T>::Distribution() const {
return impl_->Distribution();
}
template <typename T>
T* HostDeviceVector<T>::DevicePointer(int device) {
return impl_->DevicePointer(device);
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer(int device) const {
return impl_->ConstDevicePointer(device);
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan(int device) {
return impl_->DeviceSpan(device);
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan(int device) const {
return impl_->ConstDeviceSpan(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceStart(int device) const {
return impl_->DeviceStart(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceSize(int device) const {
return impl_->DeviceSize(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT
return impl_->tbegin(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin(int device) const { // NOLINT
return impl_->tcbegin(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT
return impl_->tend(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcend(int device) const { // NOLINT
return impl_->tcend(device);
}
template <typename T>
void HostDeviceVector<T>::ScatterFrom
(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
impl_->ScatterFrom(begin, end);
}
template <typename T>
void HostDeviceVector<T>::GatherTo
(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const {
impl_->GatherTo(begin, end);
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanAccess(GPUAccess access) const {
return impl_->HostCanAccess(access);
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanAccess(int device, GPUAccess access) const {
return impl_->DeviceCanAccess(device, access);
}
template <typename T>
void HostDeviceVector<T>::Reshard(GPUSet new_devices) const {
impl_->Reshard(new_devices);
}
template <typename T>
void HostDeviceVector<T>::Reshard(const GPUDistribution& distribution) const {
impl_->Reshard(distribution);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<size_t>;
} // namespace xgboost
| 2b205d57185a24ad5dd7154823d320f3ea8f2d62.cu | /*!
* Copyright 2017 XGBoost contributors
*/
#include "./host_device_vector.h"
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "./device_helpers.cuh"
namespace xgboost {
// the handler to call instead of cudaSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
// wrapper over access with useful methods
class Permissions {
GPUAccess access_;
explicit Permissions(GPUAccess access) : access_(access) {}
public:
Permissions() : access_(GPUAccess::kNone) {}
explicit Permissions(bool perm)
: access_(perm ? GPUAccess::kWrite : GPUAccess::kNone) {}
bool CanRead() const { return access_ >= kRead; }
bool CanWrite() const { return access_ == kWrite; }
bool CanAccess(GPUAccess access) const { return access_ >= access; }
void Grant(GPUAccess access) { access_ = std::max(access_, access); }
void DenyComplementary(GPUAccess compl_access) {
access_ = std::min(access_, GPUAccess::kWrite - compl_access);
}
Permissions Complementary() const {
return Permissions(GPUAccess::kWrite - access_);
}
};
template <typename T>
struct HostDeviceVectorImpl {
struct DeviceShard {
DeviceShard()
: proper_size_(0), device_(-1), start_(0), perm_d_(false),
cached_size_(~0), vec_(nullptr) {}
void Init(HostDeviceVectorImpl<T>* vec, int device) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = device;
LazyResize(vec_->Size());
perm_d_ = vec_->perm_h_.Complementary();
}
void Init(HostDeviceVectorImpl<T>* vec, const DeviceShard& other) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = other.device_;
cached_size_ = other.cached_size_;
start_ = other.start_;
proper_size_ = other.proper_size_;
SetDevice();
data_.resize(other.data_.size());
perm_d_ = other.perm_d_;
}
void ScatterFrom(const T* begin) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
dh::safe_cuda(cudaMemcpy(data_.data().get(), begin + start_,
data_.size() * sizeof(T), cudaMemcpyDefault));
}
void GatherTo(thrust::device_ptr<T> begin) {
LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(cudaMemcpy(begin.get() + start_, data_.data().get(),
proper_size_ * sizeof(T), cudaMemcpyDefault));
}
void Fill(T v) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
thrust::fill(data_.begin(), data_.end(), v);
}
void Copy(DeviceShard* other) {
// TODO(canonizer): avoid full copy of host data for this (but not for other)
LazySyncDevice(GPUAccess::kWrite);
other->LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(cudaMemcpy(data_.data().get(), other->data_.data().get(),
data_.size() * sizeof(T), cudaMemcpyDefault));
}
void LazySyncHost(GPUAccess access) {
SetDevice();
dh::safe_cuda(cudaMemcpy(vec_->data_h_.data() + start_,
data_.data().get(), proper_size_ * sizeof(T),
cudaMemcpyDeviceToHost));
perm_d_.DenyComplementary(access);
}
void LazyResize(size_t new_size) {
if (new_size == cached_size_) { return; }
// resize is required
int ndevices = vec_->distribution_.devices_.Size();
int device_index = vec_->distribution_.devices_.Index(device_);
start_ = vec_->distribution_.ShardStart(new_size, device_index);
proper_size_ = vec_->distribution_.ShardProperSize(new_size, device_index);
// The size on this device.
size_t size_d = vec_->distribution_.ShardSize(new_size, device_index);
SetDevice();
data_.resize(size_d);
cached_size_ = new_size;
}
void LazySyncDevice(GPUAccess access) {
if (perm_d_.CanAccess(access)) { return; }
if (perm_d_.CanRead()) {
// deny read to the host
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
return;
}
// data is on the host
size_t size_h = vec_->data_h_.size();
LazyResize(size_h);
SetDevice();
dh::safe_cuda(
cudaMemcpy(data_.data().get(), vec_->data_h_.data() + start_,
data_.size() * sizeof(T), cudaMemcpyHostToDevice));
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
vec_->size_d_ = size_h;
}
void SetDevice() {
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(cudaSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
}
int device_;
thrust::device_vector<T> data_;
// cached vector size
size_t cached_size_;
size_t start_;
// size of the portion to copy back to the host
size_t proper_size_;
Permissions perm_d_;
HostDeviceVectorImpl<T>* vec_;
};
HostDeviceVectorImpl(size_t size, T v, GPUDistribution distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = size;
InitShards();
Fill(v);
} else {
data_h_.resize(size, v);
}
}
// required, as a new std::mutex has to be created
HostDeviceVectorImpl(const HostDeviceVectorImpl<T>& other)
: data_h_(other.data_h_), perm_h_(other.perm_h_), size_d_(other.size_d_),
distribution_(other.distribution_), mutex_() {
shards_.resize(other.shards_.size());
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, other.shards_.at(i));
});
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, GPUDistribution distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = init.size();
InitShards();
Copy(init);
} else {
data_h_ = init;
}
}
void InitShards() {
int ndevices = distribution_.devices_.Size();
shards_.resize(ndevices);
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, distribution_.devices_.DeviceId(i));
});
}
size_t Size() const { return perm_h_.CanRead() ? data_h_.size() : size_d_; }
GPUSet Devices() const { return distribution_.devices_; }
const GPUDistribution& Distribution() const { return distribution_; }
T* DevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return shards_.at(distribution_.devices_.Index(device)).data_.data().get();
}
const T* ConstDevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).data_.data().get();
}
common::Span<T> DeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return {shards_.at(devices.Index(device)).data_.data().get(),
static_cast<typename common::Span<T>::index_type>(DeviceSize(device))};
}
common::Span<const T> ConstDeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return {shards_.at(devices.Index(device)).data_.data().get(),
static_cast<typename common::Span<const T>::index_type>(DeviceSize(device))};
}
size_t DeviceSize(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).data_.size();
}
size_t DeviceStart(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).start_;
}
thrust::device_ptr<T> tbegin(int device) { // NOLINT
return thrust::device_ptr<T>(DevicePointer(device));
}
thrust::device_ptr<const T> tcbegin(int device) { // NOLINT
return thrust::device_ptr<const T>(ConstDevicePointer(device));
}
thrust::device_ptr<T> tend(int device) { // NOLINT
return tbegin(device) + DeviceSize(device);
}
thrust::device_ptr<const T> tcend(int device) { // NOLINT
return tcbegin(device) + DeviceSize(device);
}
void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(cudaMemcpy(data_h_.data(), begin.get(),
(end - begin) * sizeof(T),
cudaMemcpyDeviceToHost));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(begin.get());
});
}
}
void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(cudaMemcpy(begin.get(), data_h_.data(),
data_h_.size() * sizeof(T),
cudaMemcpyHostToDevice));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.GatherTo(begin); });
}
}
void Fill(T v) {
if (perm_h_.CanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Fill(v); });
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
// Data is on host.
if (perm_h_.CanWrite() && other->perm_h_.CanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
// Data is on device;
if (distribution_ != other->distribution_) {
distribution_ = GPUDistribution();
Reshard(other->Distribution());
size_d_ = other->size_d_;
}
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Copy(&other->shards_.at(i));
});
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.data());
});
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.begin());
});
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kWrite);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void Reshard(const GPUDistribution& distribution) {
if (distribution_ == distribution) { return; }
CHECK(distribution_.IsEmpty() || distribution.IsEmpty());
if (distribution.IsEmpty()) {
LazySyncHost(GPUAccess::kWrite);
}
distribution_ = distribution;
InitShards();
}
void Reshard(GPUSet new_devices) {
if (distribution_.Devices() == new_devices) { return; }
Reshard(GPUDistribution::Block(new_devices));
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if (distribution_.IsFixedSize()) {
CHECK_EQ(new_size, distribution_.offsets_.back());
}
if (Size() == 0 && !distribution_.IsEmpty()) {
// fast on-device resize
perm_h_ = Permissions(false);
size_d_ = new_size;
InitShards();
Fill(v);
} else {
// resize on host
LazySyncHost(GPUAccess::kWrite);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (perm_h_.CanAccess(access)) { return; }
if (perm_h_.CanRead()) {
// data is present, just need to deny access to the device
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.perm_d_.DenyComplementary(access);
});
perm_h_.Grant(access);
return;
}
if (data_h_.size() != size_d_) { data_h_.resize(size_d_); }
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.LazySyncHost(access);
});
perm_h_.Grant(access);
}
void LazySyncDevice(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
CHECK(devices.Contains(device));
shards_.at(devices.Index(device)).LazySyncDevice(access);
}
bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); }
bool DeviceCanAccess(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
if (!devices.Contains(device)) { return false; }
return shards_.at(devices.Index(device)).perm_d_.CanAccess(access);
}
std::vector<T> data_h_;
Permissions perm_h_;
// the total size of the data stored on the devices
size_t size_d_;
GPUDistribution distribution_;
// protects size_d_ and perm_h_ when updated from multiple threads
std::mutex mutex_;
std::vector<DeviceShard> shards_;
};
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(size_t size, T v, GPUDistribution distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(size, v, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(std::initializer_list<T> init, GPUDistribution distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(const std::vector<T>& init, GPUDistribution distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other)
: impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(*other.impl_);
}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=
(const HostDeviceVector<T>& other) {
if (this == &other) { return *this; }
delete impl_;
impl_ = new HostDeviceVectorImpl<T>(*other.impl_);
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
HostDeviceVectorImpl<T>* tmp = impl_;
impl_ = nullptr;
delete tmp;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); }
template <typename T>
const GPUDistribution& HostDeviceVector<T>::Distribution() const {
return impl_->Distribution();
}
template <typename T>
T* HostDeviceVector<T>::DevicePointer(int device) {
return impl_->DevicePointer(device);
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer(int device) const {
return impl_->ConstDevicePointer(device);
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan(int device) {
return impl_->DeviceSpan(device);
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan(int device) const {
return impl_->ConstDeviceSpan(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceStart(int device) const {
return impl_->DeviceStart(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceSize(int device) const {
return impl_->DeviceSize(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT
return impl_->tbegin(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin(int device) const { // NOLINT
return impl_->tcbegin(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT
return impl_->tend(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcend(int device) const { // NOLINT
return impl_->tcend(device);
}
template <typename T>
void HostDeviceVector<T>::ScatterFrom
(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
impl_->ScatterFrom(begin, end);
}
template <typename T>
void HostDeviceVector<T>::GatherTo
(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const {
impl_->GatherTo(begin, end);
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanAccess(GPUAccess access) const {
return impl_->HostCanAccess(access);
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanAccess(int device, GPUAccess access) const {
return impl_->DeviceCanAccess(device, access);
}
template <typename T>
void HostDeviceVector<T>::Reshard(GPUSet new_devices) const {
impl_->Reshard(new_devices);
}
template <typename T>
void HostDeviceVector<T>::Reshard(const GPUDistribution& distribution) const {
impl_->Reshard(distribution);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<size_t>;
} // namespace xgboost
|
1f18f21b7a1356a44c5337f1117567f014b29d2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cuInsertionSort(float *dist, long *ind, int width, int height, int k){
// Variables
int l, i, j;
float *p_dist;
long *p_ind;
float curr_dist, max_dist;
long curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
// Pointer shift, initialization, and max value
p_dist = dist + xIndex;
p_ind = ind + xIndex;
max_dist = p_dist[0];
p_ind[0] = 1;
// Part 1 : sort kth firt elementZ
for (l=1; l<k; l++){
curr_row = l * width;
curr_dist = p_dist[curr_row];
if (curr_dist<max_dist){
i=l-1;
for (int a=0; a<l-1; a++){
if (p_dist[a*width]>curr_dist){
i=a;
break;
}
}
for (j=l; j>i; j--){
p_dist[j*width] = p_dist[(j-1)*width];
p_ind[j*width] = p_ind[(j-1)*width];
}
p_dist[i*width] = curr_dist;
p_ind[i*width] = l+1;
} else {
p_ind[l*width] = l+1;
}
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = (k-1)*width;
for (l=k; l<height; l++){
curr_dist = p_dist[l*width];
if (curr_dist<max_dist){
i=k-1;
for (int a=0; a<k-1; a++){
if (p_dist[a*width]>curr_dist){
i=a;
break;
}
}
for (j=k-1; j>i; j--){
p_dist[j*width] = p_dist[(j-1)*width];
p_ind[j*width] = p_ind[(j-1)*width];
}
p_dist[i*width] = curr_dist;
p_ind[i*width] = l+1;
max_dist = p_dist[max_row];
}
}
}
} | 1f18f21b7a1356a44c5337f1117567f014b29d2b.cu | #include "includes.h"
__global__ void cuInsertionSort(float *dist, long *ind, int width, int height, int k){
// Variables
int l, i, j;
float *p_dist;
long *p_ind;
float curr_dist, max_dist;
long curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
// Pointer shift, initialization, and max value
p_dist = dist + xIndex;
p_ind = ind + xIndex;
max_dist = p_dist[0];
p_ind[0] = 1;
// Part 1 : sort kth firt elementZ
for (l=1; l<k; l++){
curr_row = l * width;
curr_dist = p_dist[curr_row];
if (curr_dist<max_dist){
i=l-1;
for (int a=0; a<l-1; a++){
if (p_dist[a*width]>curr_dist){
i=a;
break;
}
}
for (j=l; j>i; j--){
p_dist[j*width] = p_dist[(j-1)*width];
p_ind[j*width] = p_ind[(j-1)*width];
}
p_dist[i*width] = curr_dist;
p_ind[i*width] = l+1;
} else {
p_ind[l*width] = l+1;
}
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = (k-1)*width;
for (l=k; l<height; l++){
curr_dist = p_dist[l*width];
if (curr_dist<max_dist){
i=k-1;
for (int a=0; a<k-1; a++){
if (p_dist[a*width]>curr_dist){
i=a;
break;
}
}
for (j=k-1; j>i; j--){
p_dist[j*width] = p_dist[(j-1)*width];
p_ind[j*width] = p_ind[(j-1)*width];
}
p_dist[i*width] = curr_dist;
p_ind[i*width] = l+1;
max_dist = p_dist[max_row];
}
}
}
} |
38ef836ea00e77c6d7f0ef3041cbae77224afa25.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <memory>
#include <fstream>
#include <cstdint>
#include <typeinfo>
#include <vector>
#include <thread>
#include <algorithm>
#include <cmath>
#include <limits>
#include <array>
#include <chrono>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#include <cudnn.h>
#include "utils.h"
#include "Matrix.hpp"
//#include "DatasetIonosphereHelper.hpp"
//#include "DatasetTictactoeHelper.hpp"
//#include "DatasetCreditApprovalHelper.hpp"
#include "NbitHelper.hpp"
#include "MFEAChromosome.hpp"
#include "MFEA.hpp"
#include "MFEATask.hpp"
// variables for training data
DATATYPE* training_input_data;
DATATYPE* training_output_data;
// variables for testing data
DATATYPE* testing_input_data;
DATATYPE* testing_output_data;
template<typename TYPE> void doRandomShuffle(TYPE* input_data_ptr, TYPE* output_data_ptr, size_t numberof_samples, size_t numberof_elems_per_inputrow, size_t numberof_elems_per_outputrow) {
TYPE* temp_ptr;
cudaCALL(CUDA_M_MALLOC_MANAGED(temp_ptr, DATATYPE, numberof_elems_per_inputrow > numberof_elems_per_outputrow ? numberof_elems_per_inputrow : numberof_elems_per_outputrow));
std::random_device datahelper_random_device;
std::mt19937 datahelper_mt_engine(datahelper_random_device());
std::uniform_int_distribution<> ui_dist(0, numberof_samples);
for (uint32_t i = 0; i < numberof_samples; ++i) {
uint32_t j = ui_dist(datahelper_mt_engine);
std::cout << "swap " << i << " and " << j << std::endl;
// swap input
// copy i => temp
cudaCALL(hipMemcpy(temp_ptr, input_data_ptr + i * numberof_elems_per_inputrow, numberof_elems_per_inputrow * sizeof(DATATYPE), hipMemcpyDeviceToDevice));
// copy j => i
cudaCALL(hipMemcpy(input_data_ptr + i * numberof_elems_per_inputrow, input_data_ptr + j * numberof_elems_per_inputrow, numberof_elems_per_inputrow * sizeof(DATATYPE), hipMemcpyDeviceToDevice));
// copy temp => j
cudaCALL(hipMemcpy(input_data_ptr + j * numberof_elems_per_inputrow, temp_ptr, numberof_elems_per_inputrow * sizeof(DATATYPE), hipMemcpyDeviceToDevice));
// swap output
// copy i => temp
cudaCALL(hipMemcpy(temp_ptr, output_data_ptr + i * numberof_elems_per_outputrow, numberof_elems_per_outputrow * sizeof(DATATYPE), hipMemcpyDeviceToDevice));
// copy j => i
cudaCALL(hipMemcpy(output_data_ptr + i * numberof_elems_per_outputrow, output_data_ptr + j * numberof_elems_per_outputrow, numberof_elems_per_outputrow * sizeof(DATATYPE), hipMemcpyDeviceToDevice));
// copy temp => j
cudaCALL(hipMemcpy(output_data_ptr + j * numberof_elems_per_outputrow, temp_ptr, numberof_elems_per_outputrow * sizeof(DATATYPE), hipMemcpyDeviceToDevice));
}
cudaCALL(hipFree(temp_ptr));
}
void showMUltitasksSetting() {
std::cout << "Multitasks setting:" << std::endl;;
for (uint32_t task = 0; task < TASK_SIZE; ++task) {
std::cout << "--- Task " << task << " has " << getNumberofLayersbyTask(task) << " layers (including " << getNumberofLayersbyTask(task) - 1 << " hidden layers)" << std::endl;
for (uint32_t layer = 1; layer < getNumberofLayersbyTask(task) + 1; ++layer) {
if (layer == getNumberofLayersbyTask(task)) {
std::cout << "----- Layer " << layer << " (output layer): " << getNumberofUnitsbyTaskLayer(task, layer) << " units" << std::endl;
} else {
std::cout << "----- Layer " << layer << ": " << getNumberofUnitsbyTaskLayer(task, layer) << " units" << std::endl;
}
std::cout << "------- Data offset = " << std::get<OFFSET_IDX>(getLayerWeightsandBiasesbyTaskLayer(task, layer))
<< "\t Data size = " << std::get<SIZE_IDX>(getLayerWeightsandBiasesbyTaskLayer(task, layer)) << std::endl;
std::cout << "------- Weights offset = " << std::get<OFFSET_IDX>(getLayerWeightsbyTaskLayer(task, layer))
<< "\t Weights size = " << std::get<SIZE_IDX>(getLayerWeightsbyTaskLayer(task, layer)) << std::endl;
std::cout << "------- Biases offset = " << std::get<OFFSET_IDX>(getLayerBiasesbyTaskLayer(task, layer))
<< "\t Biases size = " << std::get<SIZE_IDX>(getLayerBiasesbyTaskLayer(task, layer)) << std::endl;
}
}
}
void testDecode();
void testSBX();
void testPMU();
void testUCL();
void testReproduce();
void testEval();
int main(int argc, char** argv) {
// manually set device for running
int device_id;
if (argc > 1) {
device_id = atoi(argv[1]);
} else {
device_id = 0;
}
hipSetDevice(device_id);
// load input data
loadDataFile<DATATYPE>(training_input_data, training_output_data, testing_input_data, testing_output_data);
// Total CPU Page faults: 1384 for float
// Total CPU Page faults: 2477 for double
{// limit scope for object destruct before destroy CUDA environment
MFEA<120, 1000, 2> mfea(training_input_data, training_output_data,
testing_input_data, testing_output_data,
device_id);
if (mfea.init_libraries() != 0) {
return EXIT_FAILURE;
}
doRandomShuffle<DATATYPE>(training_input_data, training_input_data, TRAINING_SIZE, INPUT_SIZE, OUTPUT_SIZE);
// measure time code
std::chrono::time_point<std::chrono::system_clock> start, end;
std::chrono::duration<double> elapsed_seconds;
std::time_t end_time;
start = std::chrono::system_clock::now();
mfea.initialize(); // does not cause page fault
mfea.evolution(); // does not cause page fault
// measure time code
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
end_time = std::chrono::system_clock::to_time_t(end);
std::cout << "finished computation at " << std::ctime(&end_time)
<< "elapsed time: " << elapsed_seconds.count() << "s\n";
mfea.sumariseResults();
mfea.writeSumaryResults();
mfea.reEvaluateTheFinalPopulation();
// for (uint32_t i = 0; i < 200; ++i) {
// float __cf_distributionindex = 1.0 * (std::rand() % 11); // randomize between 0 - 10
// float __mf_randommatingprobability = 1.0;
// float __mf_polynomialmutationindex = 1.0 * (std::rand() % 11); // randomize between 0 - 10
// float __mf_mutationratio = 0.05 * (1 + std::rand() % 10); // randomize between 5% - 50%
// mfea.setTunableFactors(__cf_distributionindex,
// __mf_randommatingprobability,
// __mf_polynomialmutationindex,
// __mf_mutationratio );
// mfea.initialize();
// mfea.evolution();
// mfea.sumariseResults();
// mfea.writeSumaryResults();
// }
mfea.finalize_libraries();
}
showMUltitasksSetting();
// Reset CUDA evironment
hipDeviceReset();
return 0;
}
/*
int main(int argc, char** argv) {
// manually set device for running
int device_id;
if (argc > 1) {
device_id = atoi(argv[1]);
} else {
device_id = 0;
}
hipSetDevice(device_id);
hipblasHandle_t cublas_handle;
cublasCALL(hipblasCreate(&cublas_handle));
cudnnHandle_t cudnn_handle;
cudnnCALL(cudnnCreate(&cudnn_handle));
hiprandGenerator_t curand_prng;
// Create a pseudo-random number generator
hiprandCreateGenerator(&curand_prng, HIPRAND_RNG_PSEUDO_MTGP32);
// Set the seed for the random number generator using the system clock
hiprandSetPseudoRandomGeneratorSeed(curand_prng, 0);
// load input data
loadDataFile<DATATYPE>(training_input_data, training_output_data, testing_input_data, testing_output_data);
testSBX();
showMUltitasksSetting();
// Reset CUDA evironment
hipDeviceReset();
return 0;
}*/
void testDecode() {
hipblasHandle_t cublas_handle;
cublasCALL(hipblasCreate(&cublas_handle));
cudnnHandle_t cudnn_handle;
cudnnCALL(cudnnCreate(&cudnn_handle));
hiprandGenerator_t curand_prng;
// Create a pseudo-random number generator
hiprandCreateGenerator(&curand_prng, HIPRAND_RNG_PSEUDO_MTGP32);
// Set the seed for the random number generator using the system clock
hiprandSetPseudoRandomGeneratorSeed(curand_prng, 0);
std::array<MFEA_Chromosome, 4> population;
thrust::for_each(population.begin(), population.end(), MFEA_Chromosome_Randomize(curand_prng));
hipDeviceSynchronize();
std::cout << population[0];
for (uint32_t i = 0; i < getTotalLayerWeightsandBiases(); ++i) {
population[0].rnvec[i] = i;
}
printMatrix<DATATYPE>(1, getTotalLayerWeightsandBiases(), population[0].rnvec);
//cublas_transposeMatrix<DATATYPE>(6, 8, population[0].rnvec, population[1].rnvec, cublas_handle);
DATATYPE* W;
CUDA_M_MALLOC_MANAGED(W, DATATYPE, getTotalLayerWeightsandBiases());
for (uint32_t task = 0; task < TASK_SIZE; ++task) {
for (uint32_t layer = 1; layer <= getNumberofLayersbyTask(task); ++layer) {
std::tuple<uint32_t, uint32_t> shape = population[0].decode(population[0].rnvec, W, task, layer, cublas_handle);
hipDeviceSynchronize();
std::cout << "W for task " << task << " layer " << layer << " : " << std::endl;
printMatrix<DATATYPE>(std::get<MATRIX_NROW>(shape), std::get<MATRIX_NCOL>(shape), W);
std::tuple<uint32_t, uint32_t> bias = getLayerBiasesbyTaskLayer(task, layer);
std::cout << "b for task " << task << " layer " << layer << " : " << std::endl;
printMatrix<DATATYPE>(1, std::get<SIZE_IDX>(bias), population[0].rnvec + std::get<OFFSET_IDX>(bias));
}
}
}
void testSBX() {
hiprandGenerator_t curand_prng;
// Create a pseudo-random number generator
hiprandCreateGenerator(&curand_prng, HIPRAND_RNG_PSEUDO_MTGP32);
// Set the seed for the random number generator using the system clock
hiprandSetPseudoRandomGeneratorSeed(curand_prng, 0);
std::array<MFEA_Chromosome, 4> population;
DATATYPE* ct_beta;
cudaCALL(CUDA_M_MALLOC_MANAGED(ct_beta, DATATYPE, getTotalLayerWeightsandBiases()));
test_crossover(population[0], population[1],
population[2], population[3],
5, ct_beta,
curand_prng);
examineCrossover(population[0], population[1], population[2], population[3]);
}
void testPMU() {
hiprandGenerator_t curand_prng;
// Create a pseudo-random number generator
hiprandCreateGenerator(&curand_prng, HIPRAND_RNG_PSEUDO_MTGP32);
// Set the seed for the random number generator using the system clock
hiprandSetPseudoRandomGeneratorSeed(curand_prng, 0);
std::array<MFEA_Chromosome, 4> population;
DATATYPE* ct_beta;
DATATYPE* rp;
cudaCALL(CUDA_M_MALLOC_MANAGED(ct_beta, DATATYPE, getTotalLayerWeightsandBiases()));
cudaCALL(CUDA_M_MALLOC_MANAGED(rp, DATATYPE, getTotalLayerWeightsandBiases()));
test_mutate(population[0], population[1],
5, 1,
ct_beta, rp, curand_prng);
}
void testUCL() {
hiprandGenerator_t curand_prng;
// Create a pseudo-random number generator
hiprandCreateGenerator(&curand_prng, HIPRAND_RNG_PSEUDO_MTGP32);
// Set the seed for the random number generator using the system clock
hiprandSetPseudoRandomGeneratorSeed(curand_prng, 0);
std::array<MFEA_Chromosome, 4> population;
DATATYPE* ct_beta;
cudaCALL(CUDA_M_MALLOC_MANAGED(ct_beta, DATATYPE, getTotalLayerWeightsandBiases()));
test_uniformcrossoverlike(population[0], population[1], ct_beta, curand_prng);
}
void testReproduce() {
hiprandGenerator_t curand_prng;
// Create a pseudo-random number generator
hiprandCreateGenerator(&curand_prng, HIPRAND_RNG_PSEUDO_MTGP32);
// Set the seed for the random number generator using the system clock
hiprandSetPseudoRandomGeneratorSeed(curand_prng, 0);
std::array<MFEA_Chromosome, 4> population;
DATATYPE* ct_beta;
DATATYPE* rp;
cudaCALL(CUDA_M_MALLOC_MANAGED(ct_beta, DATATYPE, getTotalLayerWeightsandBiases()));
cudaCALL(CUDA_M_MALLOC_MANAGED(rp, DATATYPE, getTotalLayerWeightsandBiases()));
test_reproduce(population[0], population[1],
population[2], population[3],
2, 5, 1,
ct_beta, rp,
curand_prng);
}
void testEval() {
hipblasHandle_t cublas_handle;
cublasCALL(hipblasCreate(&cublas_handle));
cudnnHandle_t cudnn_handle;
cudnnCALL(cudnnCreate(&cudnn_handle));
hiprandGenerator_t curand_prng;
// Create a pseudo-random number generator
hiprandCreateGenerator(&curand_prng, HIPRAND_RNG_PSEUDO_MTGP32);
// Set the seed for the random number generator using the system clock
hiprandSetPseudoRandomGeneratorSeed(curand_prng, 0);
std::array<MFEA_Chromosome, 4> population;
thrust::for_each(population.begin(), population.end(), MFEA_Chromosome_Randomize(curand_prng));
DATATYPE* dev_mat_temp_rnvec;
DATATYPE* dev_mat_temp_w;
DATATYPE* dev_mat_ones;
std::array<DATATYPE*, LAYER_SIZE + 1> dev_mat_temp_layers;
cudaCALL(CUDA_M_MALLOC_MANAGED(dev_mat_temp_rnvec, DATATYPE, getTotalLayerWeightsandBiases()));
cudaCALL(CUDA_M_MALLOC_MANAGED(dev_mat_temp_w, DATATYPE, getTotalLayerWeightsandBiases()));
cudaCALL(CUDA_M_MALLOC_MANAGED(dev_mat_ones, DATATYPE, TRAINING_SIZE));
cuda_fillMatrix<DATATYPE>(TRAINING_SIZE, 1, dev_mat_ones, 1.0f);
for (uint32_t i = 0; i < LAYER_SIZE + 1; ++i) {
cudaCALL(CUDA_M_MALLOC_MANAGED(dev_mat_temp_layers[i], DATATYPE, TRAINING_SIZE * getNumberofUnitsbyTaskLayer(TASKINDEX_LARGEST, i)));
}
hipDeviceSynchronize();
BUG(getTotalLayerWeightsandBiases());
for (uint32_t i = 0; i < getTotalLayerWeightsandBiases(); ++i) {
population[0].rnvec[i] = double(i) / getTotalLayerWeightsandBiases();
}
printGPUArray(dev_mat_ones, TRAINING_SIZE);
printGPUArray(dev_mat_temp_rnvec, getTotalLayerWeightsandBiases());
int i = 0;
//for (uint32_t i = 0; i < 1; ++i) {
population[i].skill_factor = i % TASK_SIZE;
population[i].evalObj(TRAINING_SIZE, OUTPUT_SIZE,
training_input_data,
training_output_data,
dev_mat_temp_rnvec,
dev_mat_temp_w,
dev_mat_ones,
dev_mat_temp_layers,
cublas_handle, cudnn_handle,
true);
//}
printGPUArray(dev_mat_temp_layers[LAYER_SIZE - 1], TRAINING_SIZE * getNumberofUnitsbyTaskLayer(TASKINDEX_LARGEST, LAYER_SIZE));
hipDeviceSynchronize();
std::cout << population[0];
} | 38ef836ea00e77c6d7f0ef3041cbae77224afa25.cu | #include <iostream>
#include <memory>
#include <fstream>
#include <cstdint>
#include <typeinfo>
#include <vector>
#include <thread>
#include <algorithm>
#include <cmath>
#include <limits>
#include <array>
#include <chrono>
#include <cuda.h>
#include <curand.h>
#include <cublas_v2.h>
#include <cudnn.h>
#include "utils.h"
#include "Matrix.hpp"
//#include "DatasetIonosphereHelper.hpp"
//#include "DatasetTictactoeHelper.hpp"
//#include "DatasetCreditApprovalHelper.hpp"
#include "NbitHelper.hpp"
#include "MFEAChromosome.hpp"
#include "MFEA.hpp"
#include "MFEATask.hpp"
// variables for training data
DATATYPE* training_input_data;
DATATYPE* training_output_data;
// variables for testing data
DATATYPE* testing_input_data;
DATATYPE* testing_output_data;
template<typename TYPE> void doRandomShuffle(TYPE* input_data_ptr, TYPE* output_data_ptr, size_t numberof_samples, size_t numberof_elems_per_inputrow, size_t numberof_elems_per_outputrow) {
TYPE* temp_ptr;
cudaCALL(CUDA_M_MALLOC_MANAGED(temp_ptr, DATATYPE, numberof_elems_per_inputrow > numberof_elems_per_outputrow ? numberof_elems_per_inputrow : numberof_elems_per_outputrow));
std::random_device datahelper_random_device;
std::mt19937 datahelper_mt_engine(datahelper_random_device());
std::uniform_int_distribution<> ui_dist(0, numberof_samples);
for (uint32_t i = 0; i < numberof_samples; ++i) {
uint32_t j = ui_dist(datahelper_mt_engine);
std::cout << "swap " << i << " and " << j << std::endl;
// swap input
// copy i => temp
cudaCALL(cudaMemcpy(temp_ptr, input_data_ptr + i * numberof_elems_per_inputrow, numberof_elems_per_inputrow * sizeof(DATATYPE), cudaMemcpyDeviceToDevice));
// copy j => i
cudaCALL(cudaMemcpy(input_data_ptr + i * numberof_elems_per_inputrow, input_data_ptr + j * numberof_elems_per_inputrow, numberof_elems_per_inputrow * sizeof(DATATYPE), cudaMemcpyDeviceToDevice));
// copy temp => j
cudaCALL(cudaMemcpy(input_data_ptr + j * numberof_elems_per_inputrow, temp_ptr, numberof_elems_per_inputrow * sizeof(DATATYPE), cudaMemcpyDeviceToDevice));
// swap output
// copy i => temp
cudaCALL(cudaMemcpy(temp_ptr, output_data_ptr + i * numberof_elems_per_outputrow, numberof_elems_per_outputrow * sizeof(DATATYPE), cudaMemcpyDeviceToDevice));
// copy j => i
cudaCALL(cudaMemcpy(output_data_ptr + i * numberof_elems_per_outputrow, output_data_ptr + j * numberof_elems_per_outputrow, numberof_elems_per_outputrow * sizeof(DATATYPE), cudaMemcpyDeviceToDevice));
// copy temp => j
cudaCALL(cudaMemcpy(output_data_ptr + j * numberof_elems_per_outputrow, temp_ptr, numberof_elems_per_outputrow * sizeof(DATATYPE), cudaMemcpyDeviceToDevice));
}
cudaCALL(cudaFree(temp_ptr));
}
void showMUltitasksSetting() {
std::cout << "Multitasks setting:" << std::endl;;
for (uint32_t task = 0; task < TASK_SIZE; ++task) {
std::cout << "--- Task " << task << " has " << getNumberofLayersbyTask(task) << " layers (including " << getNumberofLayersbyTask(task) - 1 << " hidden layers)" << std::endl;
for (uint32_t layer = 1; layer < getNumberofLayersbyTask(task) + 1; ++layer) {
if (layer == getNumberofLayersbyTask(task)) {
std::cout << "----- Layer " << layer << " (output layer): " << getNumberofUnitsbyTaskLayer(task, layer) << " units" << std::endl;
} else {
std::cout << "----- Layer " << layer << ": " << getNumberofUnitsbyTaskLayer(task, layer) << " units" << std::endl;
}
std::cout << "------- Data offset = " << std::get<OFFSET_IDX>(getLayerWeightsandBiasesbyTaskLayer(task, layer))
<< "\t Data size = " << std::get<SIZE_IDX>(getLayerWeightsandBiasesbyTaskLayer(task, layer)) << std::endl;
std::cout << "------- Weights offset = " << std::get<OFFSET_IDX>(getLayerWeightsbyTaskLayer(task, layer))
<< "\t Weights size = " << std::get<SIZE_IDX>(getLayerWeightsbyTaskLayer(task, layer)) << std::endl;
std::cout << "------- Biases offset = " << std::get<OFFSET_IDX>(getLayerBiasesbyTaskLayer(task, layer))
<< "\t Biases size = " << std::get<SIZE_IDX>(getLayerBiasesbyTaskLayer(task, layer)) << std::endl;
}
}
}
void testDecode();
void testSBX();
void testPMU();
void testUCL();
void testReproduce();
void testEval();
int main(int argc, char** argv) {
// manually set device for running
int device_id;
if (argc > 1) {
device_id = atoi(argv[1]);
} else {
device_id = 0;
}
cudaSetDevice(device_id);
// load input data
loadDataFile<DATATYPE>(training_input_data, training_output_data, testing_input_data, testing_output_data);
// Total CPU Page faults: 1384 for float
// Total CPU Page faults: 2477 for double
{// limit scope for object destruct before destroy CUDA environment
MFEA<120, 1000, 2> mfea(training_input_data, training_output_data,
testing_input_data, testing_output_data,
device_id);
if (mfea.init_libraries() != 0) {
return EXIT_FAILURE;
}
doRandomShuffle<DATATYPE>(training_input_data, training_input_data, TRAINING_SIZE, INPUT_SIZE, OUTPUT_SIZE);
// measure time code
std::chrono::time_point<std::chrono::system_clock> start, end;
std::chrono::duration<double> elapsed_seconds;
std::time_t end_time;
start = std::chrono::system_clock::now();
mfea.initialize(); // does not cause page fault
mfea.evolution(); // does not cause page fault
// measure time code
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
end_time = std::chrono::system_clock::to_time_t(end);
std::cout << "finished computation at " << std::ctime(&end_time)
<< "elapsed time: " << elapsed_seconds.count() << "s\n";
mfea.sumariseResults();
mfea.writeSumaryResults();
mfea.reEvaluateTheFinalPopulation();
// for (uint32_t i = 0; i < 200; ++i) {
// float __cf_distributionindex = 1.0 * (std::rand() % 11); // randomize between 0 - 10
// float __mf_randommatingprobability = 1.0;
// float __mf_polynomialmutationindex = 1.0 * (std::rand() % 11); // randomize between 0 - 10
// float __mf_mutationratio = 0.05 * (1 + std::rand() % 10); // randomize between 5% - 50%
// mfea.setTunableFactors(__cf_distributionindex,
// __mf_randommatingprobability,
// __mf_polynomialmutationindex,
// __mf_mutationratio );
// mfea.initialize();
// mfea.evolution();
// mfea.sumariseResults();
// mfea.writeSumaryResults();
// }
mfea.finalize_libraries();
}
showMUltitasksSetting();
// Reset CUDA evironment
cudaDeviceReset();
return 0;
}
/*
int main(int argc, char** argv) {
// manually set device for running
int device_id;
if (argc > 1) {
device_id = atoi(argv[1]);
} else {
device_id = 0;
}
cudaSetDevice(device_id);
cublasHandle_t cublas_handle;
cublasCALL(cublasCreate(&cublas_handle));
cudnnHandle_t cudnn_handle;
cudnnCALL(cudnnCreate(&cudnn_handle));
curandGenerator_t curand_prng;
// Create a pseudo-random number generator
curandCreateGenerator(&curand_prng, CURAND_RNG_PSEUDO_MTGP32);
// Set the seed for the random number generator using the system clock
curandSetPseudoRandomGeneratorSeed(curand_prng, 0);
// load input data
loadDataFile<DATATYPE>(training_input_data, training_output_data, testing_input_data, testing_output_data);
testSBX();
showMUltitasksSetting();
// Reset CUDA evironment
cudaDeviceReset();
return 0;
}*/
void testDecode() {
cublasHandle_t cublas_handle;
cublasCALL(cublasCreate(&cublas_handle));
cudnnHandle_t cudnn_handle;
cudnnCALL(cudnnCreate(&cudnn_handle));
curandGenerator_t curand_prng;
// Create a pseudo-random number generator
curandCreateGenerator(&curand_prng, CURAND_RNG_PSEUDO_MTGP32);
// Set the seed for the random number generator using the system clock
curandSetPseudoRandomGeneratorSeed(curand_prng, 0);
std::array<MFEA_Chromosome, 4> population;
thrust::for_each(population.begin(), population.end(), MFEA_Chromosome_Randomize(curand_prng));
cudaDeviceSynchronize();
std::cout << population[0];
for (uint32_t i = 0; i < getTotalLayerWeightsandBiases(); ++i) {
population[0].rnvec[i] = i;
}
printMatrix<DATATYPE>(1, getTotalLayerWeightsandBiases(), population[0].rnvec);
//cublas_transposeMatrix<DATATYPE>(6, 8, population[0].rnvec, population[1].rnvec, cublas_handle);
DATATYPE* W;
CUDA_M_MALLOC_MANAGED(W, DATATYPE, getTotalLayerWeightsandBiases());
for (uint32_t task = 0; task < TASK_SIZE; ++task) {
for (uint32_t layer = 1; layer <= getNumberofLayersbyTask(task); ++layer) {
std::tuple<uint32_t, uint32_t> shape = population[0].decode(population[0].rnvec, W, task, layer, cublas_handle);
cudaDeviceSynchronize();
std::cout << "W for task " << task << " layer " << layer << " : " << std::endl;
printMatrix<DATATYPE>(std::get<MATRIX_NROW>(shape), std::get<MATRIX_NCOL>(shape), W);
std::tuple<uint32_t, uint32_t> bias = getLayerBiasesbyTaskLayer(task, layer);
std::cout << "b for task " << task << " layer " << layer << " : " << std::endl;
printMatrix<DATATYPE>(1, std::get<SIZE_IDX>(bias), population[0].rnvec + std::get<OFFSET_IDX>(bias));
}
}
}
void testSBX() {
curandGenerator_t curand_prng;
// Create a pseudo-random number generator
curandCreateGenerator(&curand_prng, CURAND_RNG_PSEUDO_MTGP32);
// Set the seed for the random number generator using the system clock
curandSetPseudoRandomGeneratorSeed(curand_prng, 0);
std::array<MFEA_Chromosome, 4> population;
DATATYPE* ct_beta;
cudaCALL(CUDA_M_MALLOC_MANAGED(ct_beta, DATATYPE, getTotalLayerWeightsandBiases()));
test_crossover(population[0], population[1],
population[2], population[3],
5, ct_beta,
curand_prng);
examineCrossover(population[0], population[1], population[2], population[3]);
}
void testPMU() {
curandGenerator_t curand_prng;
// Create a pseudo-random number generator
curandCreateGenerator(&curand_prng, CURAND_RNG_PSEUDO_MTGP32);
// Set the seed for the random number generator using the system clock
curandSetPseudoRandomGeneratorSeed(curand_prng, 0);
std::array<MFEA_Chromosome, 4> population;
DATATYPE* ct_beta;
DATATYPE* rp;
cudaCALL(CUDA_M_MALLOC_MANAGED(ct_beta, DATATYPE, getTotalLayerWeightsandBiases()));
cudaCALL(CUDA_M_MALLOC_MANAGED(rp, DATATYPE, getTotalLayerWeightsandBiases()));
test_mutate(population[0], population[1],
5, 1,
ct_beta, rp, curand_prng);
}
void testUCL() {
curandGenerator_t curand_prng;
// Create a pseudo-random number generator
curandCreateGenerator(&curand_prng, CURAND_RNG_PSEUDO_MTGP32);
// Set the seed for the random number generator using the system clock
curandSetPseudoRandomGeneratorSeed(curand_prng, 0);
std::array<MFEA_Chromosome, 4> population;
DATATYPE* ct_beta;
cudaCALL(CUDA_M_MALLOC_MANAGED(ct_beta, DATATYPE, getTotalLayerWeightsandBiases()));
test_uniformcrossoverlike(population[0], population[1], ct_beta, curand_prng);
}
void testReproduce() {
curandGenerator_t curand_prng;
// Create a pseudo-random number generator
curandCreateGenerator(&curand_prng, CURAND_RNG_PSEUDO_MTGP32);
// Set the seed for the random number generator using the system clock
curandSetPseudoRandomGeneratorSeed(curand_prng, 0);
std::array<MFEA_Chromosome, 4> population;
DATATYPE* ct_beta;
DATATYPE* rp;
cudaCALL(CUDA_M_MALLOC_MANAGED(ct_beta, DATATYPE, getTotalLayerWeightsandBiases()));
cudaCALL(CUDA_M_MALLOC_MANAGED(rp, DATATYPE, getTotalLayerWeightsandBiases()));
test_reproduce(population[0], population[1],
population[2], population[3],
2, 5, 1,
ct_beta, rp,
curand_prng);
}
void testEval() {
cublasHandle_t cublas_handle;
cublasCALL(cublasCreate(&cublas_handle));
cudnnHandle_t cudnn_handle;
cudnnCALL(cudnnCreate(&cudnn_handle));
curandGenerator_t curand_prng;
// Create a pseudo-random number generator
curandCreateGenerator(&curand_prng, CURAND_RNG_PSEUDO_MTGP32);
// Set the seed for the random number generator using the system clock
curandSetPseudoRandomGeneratorSeed(curand_prng, 0);
std::array<MFEA_Chromosome, 4> population;
thrust::for_each(population.begin(), population.end(), MFEA_Chromosome_Randomize(curand_prng));
DATATYPE* dev_mat_temp_rnvec;
DATATYPE* dev_mat_temp_w;
DATATYPE* dev_mat_ones;
std::array<DATATYPE*, LAYER_SIZE + 1> dev_mat_temp_layers;
cudaCALL(CUDA_M_MALLOC_MANAGED(dev_mat_temp_rnvec, DATATYPE, getTotalLayerWeightsandBiases()));
cudaCALL(CUDA_M_MALLOC_MANAGED(dev_mat_temp_w, DATATYPE, getTotalLayerWeightsandBiases()));
cudaCALL(CUDA_M_MALLOC_MANAGED(dev_mat_ones, DATATYPE, TRAINING_SIZE));
cuda_fillMatrix<DATATYPE>(TRAINING_SIZE, 1, dev_mat_ones, 1.0f);
for (uint32_t i = 0; i < LAYER_SIZE + 1; ++i) {
cudaCALL(CUDA_M_MALLOC_MANAGED(dev_mat_temp_layers[i], DATATYPE, TRAINING_SIZE * getNumberofUnitsbyTaskLayer(TASKINDEX_LARGEST, i)));
}
cudaDeviceSynchronize();
BUG(getTotalLayerWeightsandBiases());
for (uint32_t i = 0; i < getTotalLayerWeightsandBiases(); ++i) {
population[0].rnvec[i] = double(i) / getTotalLayerWeightsandBiases();
}
printGPUArray(dev_mat_ones, TRAINING_SIZE);
printGPUArray(dev_mat_temp_rnvec, getTotalLayerWeightsandBiases());
int i = 0;
//for (uint32_t i = 0; i < 1; ++i) {
population[i].skill_factor = i % TASK_SIZE;
population[i].evalObj(TRAINING_SIZE, OUTPUT_SIZE,
training_input_data,
training_output_data,
dev_mat_temp_rnvec,
dev_mat_temp_w,
dev_mat_ones,
dev_mat_temp_layers,
cublas_handle, cudnn_handle,
true);
//}
printGPUArray(dev_mat_temp_layers[LAYER_SIZE - 1], TRAINING_SIZE * getNumberofUnitsbyTaskLayer(TASKINDEX_LARGEST, LAYER_SIZE));
cudaDeviceSynchronize();
std::cout << population[0];
} |
0a97e3d49ac1ed8c072da0ca3531d27ee4d4b020.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernal.cuh"
#define STB_IMAGE_IMPLEMENTATION
#include <stb_image.h>
#define STB_IMAGE_WRITE_IMPLEMENTATION
#define STBI_MSC_SECURE_CRT
#include <stb_image_write.h>
#include <iostream>
uchar3 *d_rgbImage, *d_blurImage;
unsigned char *d_red, *d_green, *d_blue;
unsigned char *d_blurRed, *d_blurGreen, *d_blurBlue;
float *d_filter;
void your_gaussian_blur(imageInfo* ii, unsigned char* h_blurImage, const float *const h_filter, size_t filterWidth)
{
// Allocate GPU memories
allocateMemoryAndCopyToGPU(ii, h_filter, filterWidth);
// Step 1: RGB
hipLaunchKernelGGL(( separateChannels), dim3(ii->height),dim3(ii->width), 0, 0, d_red, d_green, d_blue, d_rgbImage, ii->height, ii->width);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Step 2: Blur
gaussian_blur << <ii->height, ii->width >> > (d_red, d_blurRed,ii->height,ii->width,d_filter,filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
gaussian_blur << <ii->height, ii->width >> > (d_green, d_blurGreen, ii->height, ii->width, d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
gaussian_blur << <ii->height, ii->width >> > (d_blue, d_blurBlue, ii->height, ii->width, d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Step 3:
recombineChannels << <ii->height, ii->width >> > (d_blurRed, d_blurGreen, d_blurBlue ,d_blurImage,ii->height, ii->width);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Step 4: GPUblurImage CPU
checkCudaErrors(hipMemcpy(h_blurImage, d_blurImage, ii->resolution * sizeof(uchar3), hipMemcpyDeviceToHost));
//
cleanGPUMemory();
}
void allocateMemoryAndCopyToGPU(imageInfo *ii, const float* const h_filter, const size_t filterWidth)
{
int numPixels = ii->resolution;
// allocate memory on GPU for picture
checkCudaErrors(hipMalloc((void**)&d_rgbImage, numPixels * sizeof(uchar3)));
checkCudaErrors(hipMalloc((void**)&d_blurImage, numPixels * sizeof(uchar3)));
// Copy Image from CPU to GPU
checkCudaErrors(hipMemcpy(d_rgbImage, ii->image, numPixels * sizeof(uchar3), hipMemcpyHostToDevice));
checkCudaErrors(hipMemset(d_blurImage, 0, numPixels * sizeof(uchar3)));
// allocate channels for image
checkCudaErrors(hipMalloc((void**)&d_red, numPixels * sizeof(unsigned char)));
checkCudaErrors(hipMalloc((void**)&d_green, numPixels * sizeof(unsigned char)));
checkCudaErrors(hipMalloc((void**)&d_blue, numPixels * sizeof(unsigned char)));
checkCudaErrors(hipMemset(d_red, 0, numPixels * sizeof(unsigned char)));
checkCudaErrors(hipMemset(d_green, 0, numPixels * sizeof(unsigned char)));
checkCudaErrors(hipMemset(d_blue, 0, numPixels * sizeof(unsigned char)));
// allocate channels for blured image
checkCudaErrors(hipMalloc((void**)&d_blurRed, numPixels * sizeof(unsigned char)));
checkCudaErrors(hipMalloc((void**)&d_blurGreen, numPixels * sizeof(unsigned char)));
checkCudaErrors(hipMalloc((void**)&d_blurBlue, numPixels * sizeof(unsigned char)));
checkCudaErrors(hipMemset(d_blurRed, 0, numPixels * sizeof(unsigned char)));
checkCudaErrors(hipMemset(d_blurGreen, 0, numPixels * sizeof(unsigned char)));
checkCudaErrors(hipMemset(d_blurBlue, 0, numPixels * sizeof(unsigned char)));
// Allocate memory for filter
checkCudaErrors(hipMalloc((void**)&d_filter, filterWidth * filterWidth * sizeof(float)));
checkCudaErrors(hipMemcpy(d_filter, h_filter, filterWidth * filterWidth * sizeof(float), hipMemcpyHostToDevice));
}
void cleanGPUMemory()
{
// Free GPU memory
checkCudaErrors(hipFree(d_rgbImage));
checkCudaErrors(hipFree(d_blurImage));
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
checkCudaErrors(hipFree(d_blurRed));
checkCudaErrors(hipFree(d_blurGreen));
checkCudaErrors(hipFree(d_blurBlue));
checkCudaErrors(hipFree(d_filter));
}
__global__ void separateChannels(unsigned char * const redChannel, unsigned char * const greenChannel,
unsigned char * const blueChannel, const uchar3 * const inputImageRGB, int numRows, int numCols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
redChannel[idx] = inputImageRGB[idx].x;
greenChannel[idx] = inputImageRGB[idx].y;
blueChannel[idx] = inputImageRGB[idx].z;
}
__global__ void recombineChannels(const unsigned char * const redChannel, const unsigned char * const greenChannel,
const unsigned char * const blueChannel, uchar3 * const outputImageRGB, int numRows, int numCols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned char red = redChannel[idx];
unsigned char green = greenChannel[idx];
unsigned char blue = blueChannel[idx];
outputImageRGB[idx] = make_uchar3(red, green, blue);
}
__global__ void gaussian_blur(const unsigned char * const inputChannel, unsigned char * const outputChannel,
int numRows, int numCols, const float * const filter, const int filterWidth)
{
// compute current row and cloumn index
int row = blockIdx.x;
int col = threadIdx.x;
int currentTidx = row * blockDim.x + col;
// find its neighbors' index
int left = (col - filterWidth / 2) ; left = left < 0 ? 0 : left;
int right = (col + filterWidth / 2) ; right = right < numCols? right : numCols;
int up = (row - filterWidth / 2) ; up = up < 0 ? 0 : up;
int below = (row + filterWidth / 2) ; below = below < numRows ? below : numRows;
for (size_t i = left; i < left + filterWidth && i <= right; i++)
for (size_t j = up; j < up + filterWidth && j <= below; j++)
{
int tIdx = j * blockDim.x + i;
int x = i - col;
int y = j - row;
int filterIdx = (y - 1) * filterWidth + x;
filterIdx = 0 - filterIdx;
outputChannel[currentTidx] += filter[filterIdx] * inputChannel[tIdx];
}
}
bool readImage(const char * filename, imageInfo* ii)
{
int width, height, channels_in_file;
ii->image = stbi_load(filename, &width, &height, &channels_in_file, 0);
if (ii->image == NULL)
{
std::cerr << "Failed to load Image at: " << filename << std::endl;
return false;
}
ii->height = height;
ii->width = width;
ii->resolution = height * width;
return true;
}
void writeImage(const char* filename, imageInfo* ii, const unsigned char *h_blurImage)
{
int res = stbi_write_jpg(filename, ii->width, ii->height, 3, h_blurImage, 0);
if (res == 0)
{
std::cout << "Failed to write image file" << std::endl;
return;
}
std::cout << "Write Image Successfully to: " << filename << std::endl;
}
void exec(const char * inputFile, const char * outputFile)
{
//
imageInfo* ii = new imageInfo();
bool res = readImage(inputFile, ii);
if (!res) return;
unsigned char *h_out = (unsigned char*)malloc(sizeof(uchar3) * ii->resolution);
if (h_out == NULL)
{
std::cout << "Failed to malloc h_out space" << std::endl;
return;
}
float h_filter[] = { 0.0,0.2,0.0,
0.2,0.2,0.2,
0.0,0.2,0.0 };
// Blur
your_gaussian_blur(ii,h_out,h_filter,3);
// Blur
writeImage(outputFile, ii, h_out);
//
free(ii);
free(h_out);
h_out = NULL;
} | 0a97e3d49ac1ed8c072da0ca3531d27ee4d4b020.cu | #include "kernal.cuh"
#define STB_IMAGE_IMPLEMENTATION
#include <stb_image.h>
#define STB_IMAGE_WRITE_IMPLEMENTATION
#define STBI_MSC_SECURE_CRT
#include <stb_image_write.h>
#include <iostream>
uchar3 *d_rgbImage, *d_blurImage;
unsigned char *d_red, *d_green, *d_blue;
unsigned char *d_blurRed, *d_blurGreen, *d_blurBlue;
float *d_filter;
void your_gaussian_blur(imageInfo* ii, unsigned char* h_blurImage, const float *const h_filter, size_t filterWidth)
{
// Allocate GPU memories
allocateMemoryAndCopyToGPU(ii, h_filter, filterWidth);
// Step 1: 将RGB三通道分开
separateChannels<<<ii->height,ii->width>>>(d_red, d_green, d_blue, d_rgbImage, ii->height, ii->width);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Step 2: 对每个通过分别进行Blur操作
gaussian_blur << <ii->height, ii->width >> > (d_red, d_blurRed,ii->height,ii->width,d_filter,filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur << <ii->height, ii->width >> > (d_green, d_blurGreen, ii->height, ii->width, d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur << <ii->height, ii->width >> > (d_blue, d_blurBlue, ii->height, ii->width, d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Step 3: 将三通道合并
recombineChannels << <ii->height, ii->width >> > (d_blurRed, d_blurGreen, d_blurBlue ,d_blurImage,ii->height, ii->width);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Step 4: 将GPU上blur的Image 传到 CPU内存上
checkCudaErrors(cudaMemcpy(h_blurImage, d_blurImage, ii->resolution * sizeof(uchar3), cudaMemcpyDeviceToHost));
// 释放显存空间
cleanGPUMemory();
}
void allocateMemoryAndCopyToGPU(imageInfo *ii, const float* const h_filter, const size_t filterWidth)
{
int numPixels = ii->resolution;
// allocate memory on GPU for picture
checkCudaErrors(cudaMalloc((void**)&d_rgbImage, numPixels * sizeof(uchar3)));
checkCudaErrors(cudaMalloc((void**)&d_blurImage, numPixels * sizeof(uchar3)));
// Copy Image from CPU to GPU
checkCudaErrors(cudaMemcpy(d_rgbImage, ii->image, numPixels * sizeof(uchar3), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemset(d_blurImage, 0, numPixels * sizeof(uchar3)));
// allocate channels for image
checkCudaErrors(cudaMalloc((void**)&d_red, numPixels * sizeof(unsigned char)));
checkCudaErrors(cudaMalloc((void**)&d_green, numPixels * sizeof(unsigned char)));
checkCudaErrors(cudaMalloc((void**)&d_blue, numPixels * sizeof(unsigned char)));
checkCudaErrors(cudaMemset(d_red, 0, numPixels * sizeof(unsigned char)));
checkCudaErrors(cudaMemset(d_green, 0, numPixels * sizeof(unsigned char)));
checkCudaErrors(cudaMemset(d_blue, 0, numPixels * sizeof(unsigned char)));
// allocate channels for blured image
checkCudaErrors(cudaMalloc((void**)&d_blurRed, numPixels * sizeof(unsigned char)));
checkCudaErrors(cudaMalloc((void**)&d_blurGreen, numPixels * sizeof(unsigned char)));
checkCudaErrors(cudaMalloc((void**)&d_blurBlue, numPixels * sizeof(unsigned char)));
checkCudaErrors(cudaMemset(d_blurRed, 0, numPixels * sizeof(unsigned char)));
checkCudaErrors(cudaMemset(d_blurGreen, 0, numPixels * sizeof(unsigned char)));
checkCudaErrors(cudaMemset(d_blurBlue, 0, numPixels * sizeof(unsigned char)));
// Allocate memory for filter
checkCudaErrors(cudaMalloc((void**)&d_filter, filterWidth * filterWidth * sizeof(float)));
checkCudaErrors(cudaMemcpy(d_filter, h_filter, filterWidth * filterWidth * sizeof(float), cudaMemcpyHostToDevice));
}
void cleanGPUMemory()
{
// Free GPU memory
checkCudaErrors(cudaFree(d_rgbImage));
checkCudaErrors(cudaFree(d_blurImage));
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_blurRed));
checkCudaErrors(cudaFree(d_blurGreen));
checkCudaErrors(cudaFree(d_blurBlue));
checkCudaErrors(cudaFree(d_filter));
}
__global__ void separateChannels(unsigned char * const redChannel, unsigned char * const greenChannel,
unsigned char * const blueChannel, const uchar3 * const inputImageRGB, int numRows, int numCols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
redChannel[idx] = inputImageRGB[idx].x;
greenChannel[idx] = inputImageRGB[idx].y;
blueChannel[idx] = inputImageRGB[idx].z;
}
__global__ void recombineChannels(const unsigned char * const redChannel, const unsigned char * const greenChannel,
const unsigned char * const blueChannel, uchar3 * const outputImageRGB, int numRows, int numCols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned char red = redChannel[idx];
unsigned char green = greenChannel[idx];
unsigned char blue = blueChannel[idx];
outputImageRGB[idx] = make_uchar3(red, green, blue);
}
__global__ void gaussian_blur(const unsigned char * const inputChannel, unsigned char * const outputChannel,
int numRows, int numCols, const float * const filter, const int filterWidth)
{
// compute current row and cloumn index
int row = blockIdx.x;
int col = threadIdx.x;
int currentTidx = row * blockDim.x + col;
// find its neighbors' index
int left = (col - filterWidth / 2) ; left = left < 0 ? 0 : left;
int right = (col + filterWidth / 2) ; right = right < numCols? right : numCols;
int up = (row - filterWidth / 2) ; up = up < 0 ? 0 : up;
int below = (row + filterWidth / 2) ; below = below < numRows ? below : numRows;
for (size_t i = left; i < left + filterWidth && i <= right; i++)
for (size_t j = up; j < up + filterWidth && j <= below; j++)
{
int tIdx = j * blockDim.x + i;
int x = i - col;
int y = j - row;
int filterIdx = (y - 1) * filterWidth + x;
filterIdx = 0 - filterIdx;
outputChannel[currentTidx] += filter[filterIdx] * inputChannel[tIdx];
}
}
bool readImage(const char * filename, imageInfo* ii)
{
int width, height, channels_in_file;
ii->image = stbi_load(filename, &width, &height, &channels_in_file, 0);
if (ii->image == NULL)
{
std::cerr << "Failed to load Image at: " << filename << std::endl;
return false;
}
ii->height = height;
ii->width = width;
ii->resolution = height * width;
return true;
}
void writeImage(const char* filename, imageInfo* ii, const unsigned char *h_blurImage)
{
int res = stbi_write_jpg(filename, ii->width, ii->height, 3, h_blurImage, 0);
if (res == 0)
{
std::cout << "Failed to write image file" << std::endl;
return;
}
std::cout << "Write Image Successfully to: " << filename << std::endl;
}
void exec(const char * inputFile, const char * outputFile)
{
// 读取图片
imageInfo* ii = new imageInfo();
bool res = readImage(inputFile, ii);
if (!res) return;
unsigned char *h_out = (unsigned char*)malloc(sizeof(uchar3) * ii->resolution);
if (h_out == NULL)
{
std::cout << "Failed to malloc h_out space" << std::endl;
return;
}
float h_filter[] = { 0.0,0.2,0.0,
0.2,0.2,0.2,
0.0,0.2,0.0 };
// Blur 图片
your_gaussian_blur(ii,h_out,h_filter,3);
// 保存 Blur 图片
writeImage(outputFile, ii, h_out);
// 释放空间
free(ii);
free(h_out);
h_out = NULL;
} |
abff219801d2f17439216d98baf2e5ee82ca4dc1.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
template<typename scalar_t>
struct CompareGEFunctor {
__device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {
return a >= b;
}
};
void ge_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "ge_cuda", [&]() {
gpu_kernel_with_scalars(iter, CompareGEFunctor<scalar_t>());
});
}
REGISTER_DISPATCH(ge_stub, &ge_kernel_cuda);
}} // namespace at::native
| abff219801d2f17439216d98baf2e5ee82ca4dc1.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
template<typename scalar_t>
struct CompareGEFunctor {
__device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {
return a >= b;
}
};
void ge_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "ge_cuda", [&]() {
gpu_kernel_with_scalars(iter, CompareGEFunctor<scalar_t>());
});
}
REGISTER_DISPATCH(ge_stub, &ge_kernel_cuda);
}} // namespace at::native
|
fa55c98800259f51ebdebcf41b1ba198dcd801ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THH/THHAtomics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/device_vector.h>
#include <ATen/native/hip/EmbeddingBackwardKernel.cuh>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
constexpr int MODE_SUM = 0;
constexpr int MODE_MEAN = 1;
constexpr int MODE_MAX = 2;
// This kernel assumes that all input tensors except `weight` and
// per_sample_weights are contiguous.
template <typename scalar_t>
__global__ void EmbeddingBag_updateOutputKernel(
int64_t *input, int64_t *offsets, scalar_t *weight, scalar_t *output,
int64_t *offset2bag, int64_t numIndices, int64_t numBags,
int64_t featureSize, int64_t weight_stide0, int64_t weight_stride1,
int mode, int64_t *bag_size, int64_t *max_indices,
scalar_t* per_sample_weights, int64_t per_sample_weights_stride) {
// the strategy here is that each bag x feature is handled by a single thread
using accscalar_t = acc_type<scalar_t, true>;
int64_t chunksPerBag = THCCeilDiv(featureSize, (int64_t)blockDim.x);
int64_t numChunks = numBags * chunksPerBag;
int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y;
int64_t chunkStride = gridDim.x * blockDim.y;
for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) {
int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x;
if (featureDim < featureSize) {
int64_t bag = chunk / chunksPerBag;
scalar_t *weightFeat = weight + featureDim * weight_stride1;
int64_t begin = bag == 0 ? 0 : offsets[bag]; // forces first offset to be 0 instead of asserting on it
int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices;
assert(end >= begin);
accscalar_t weightFeatSum = 0;
scalar_t weightFeatMax;
int64_t bag_size_ = 0;
int64_t maxWord = -1;
for (int64_t emb = begin; emb < end; emb++) {
const int64_t weightRow = input[emb] * weight_stide0;
scalar_t weightValue = weightFeat[weightRow];
if (mode == MODE_MAX) {
if (emb == begin || weightValue > weightFeatMax) {
weightFeatMax = weightValue;
maxWord = input[emb];
}
} else {
if (per_sample_weights) {
accscalar_t scaleWeightBy = static_cast<accscalar_t>(
per_sample_weights[emb * per_sample_weights_stride]);
weightFeatSum += scaleWeightBy * static_cast<accscalar_t>(weightValue);
} else {
weightFeatSum += static_cast<accscalar_t>(weightValue);
}
}
bag_size_++;
if (featureDim == 0) {
offset2bag[emb] = bag;
}
}
if (mode == MODE_MEAN) {
if (end == begin) {
bag_size[bag] = 0;
} else {
weightFeatSum = weightFeatSum / static_cast<accscalar_t>(bag_size_);
bag_size[bag] = bag_size_;
}
}
if (mode == MODE_MEAN || mode == MODE_SUM) {
output[bag * featureSize + featureDim] = static_cast<scalar_t>(weightFeatSum);
}
else if (mode == MODE_MAX) {
if (end == begin) {
// If bag is empty, set output to 0.
weightFeatMax = 0;
}
max_indices[bag * featureSize + featureDim] = maxWord;
output[bag * featureSize + featureDim] = weightFeatMax;
}
}
}
}
Tensor embedding_bag_backward_cuda_sum_avg(
const Tensor &grad,
const Tensor &indices,
const Tensor &offset2bag,
const Tensor &bag_size,
int64_t num_weights,
bool scale_grad_by_freq, int64_t mode,
const Tensor& per_sample_weights) {
auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
ptrdiff_t numel = indices.numel();
if (numel == 0) {
// all empty bags
return at::zeros({num_weights, grad.size(1)}, grad.options());
}
int64_t stride = grad_weight.stride(0);
auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>());
thrust::copy(policy, count_iter, count_iter + numel, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + numel, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>());
auto count_data = device_ptr(count.data_ptr<int64_t>());
thrust::inclusive_scan_by_key(policy, sorted_data, sorted_data + numel,
thrust::make_constant_iterator(1),
count_data);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy, thrust::make_reverse_iterator(sorted_data + numel),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + numel),
thrust::make_reverse_iterator(count_data + numel),
thrust::equal_to<int64_t>(), thrust::maximum<int64_t>());
}
return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices,
count, num_weights, /* padding_idx= */ -1, scale_grad_by_freq,
mode == MODE_MEAN, offset2bag, bag_size, per_sample_weights);
}
template <typename scalar_t>
__global__ void EmbeddingBag_accGradParametersKernel_max(
int64_t *max_indices, scalar_t *gradOutput,
scalar_t *gradWeight, int64_t stride, int64_t numBags) {
using accscalar_t = acc_type<scalar_t, true>;
int64_t chunksPerBag = THCCeilDiv(stride, (int64_t)blockDim.x);
int64_t numChunks = numBags * chunksPerBag;
int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y;
int64_t chunkStride = gridDim.x * blockDim.y;
for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) {
int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x;
if (featureDim < stride) {
int64_t bag = chunk / chunksPerBag;
int64_t word_idx = max_indices[bag * stride + featureDim];
if (word_idx >= 0) {
// If bag is empty, we have max_indices[idx] set to -1 in forward.
gpuAtomicAdd(&(gradWeight[word_idx * stride + featureDim]),
gradOutput[bag * stride + featureDim]);
}
}
}
}
Tensor embedding_bag_backward_cuda_max(const Tensor &grad,
const Tensor &max_indices,
int64_t num_weights) {
auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options());
int64_t stride = grad_weight.stride(0);
int64_t numBags = grad.size(0);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
#ifdef __HIP_PLATFORM_HCC__
dim3 block = dim3(64, 4);
#else
dim3 block = dim3(32, 8);
#endif
int grid = 1024;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "embedding_bag_backward_cuda_max", [&] {
hipLaunchKernelGGL(( EmbeddingBag_accGradParametersKernel_max<
scalar_t>), dim3(grid), dim3(block), 0, stream,
max_indices.data_ptr<int64_t>(), grad.data_ptr<scalar_t>(),
grad_weight.data_ptr<scalar_t>(), stride, numBags);
});
AT_CUDA_CHECK(hipGetLastError());
return grad_weight;
}
}
// Assumes all input tensors are contiguous.
// See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details
std::tuple<Tensor, Tensor, Tensor, Tensor>
_embedding_bag_cuda(const Tensor &weight, const Tensor &indices,
const Tensor &offsets, const bool scale_grad_by_freq,
const int64_t mode, bool sparse,
const Tensor& per_sample_weights,
bool include_last_offset) {
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_bag_cuda", indices_arg, kLong);
auto offsets_arg = TensorArg(offsets, "offsets", 1);
checkScalarType("embedding_bag_cuda", offsets_arg, kLong);
auto weight_arg = TensorArg(weight, "weight", 1);
checkSameGPU("embedding_bag_cuda", weight_arg, indices_arg);
checkSameGPU("embedding_bag_cuda", weight_arg, offsets_arg);
int64_t numIndices = indices.size(0);
int64_t numBags = offsets.size(0);
if (include_last_offset) {
// Check https://github.com/pytorch/pytorch/issues/29019
// We plan to add one more element in offsets, which is equal to the size of
// indices. Currently for cuda devices, we still use the legacy
// implementation even this flag is enabled.
TORCH_CHECK(
numBags >= 1, "include_last_offset: numBags should be at least 1");
numBags -= 1;
}
int64_t featureSize = weight.size(1);
auto bag_size = at::empty(offsets.sizes(), indices.options());
auto offset2bag =
at::empty({indices.size(0)}, indices.options()); // offset2bag = [0 0 0 0 0]
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto output = at::empty({numBags, featureSize}, weight.options());
Tensor max_indices;
if (mode == MODE_MAX) {
max_indices = at::empty({numBags, featureSize}, indices.options());
} else {
// No need to allocate if we aren't doing a backwards pass
max_indices = at::empty({0}, indices.options());
}
#ifdef __HIP_PLATFORM_HCC__
dim3 block = dim3(64, 4);
#else
dim3 block = dim3(32, 8);
#endif
int grid = 1024;
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, weight.scalar_type(), "embedding_bag_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "embedding_bag_cuda", [&] {
hipLaunchKernelGGL(( EmbeddingBag_updateOutputKernel<scalar_t>), dim3(grid), dim3(block), 0, stream,
indices.data_ptr<int64_t>(), offsets.data_ptr<int64_t>(),
weight.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(),
offset2bag.data_ptr<int64_t>(), numIndices, numBags, featureSize,
weight.stride(0), weight.stride(1), mode, bag_size.data_ptr<int64_t>(),
mode == MODE_MAX ? max_indices.data_ptr<int64_t>() : NULL,
per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL,
per_sample_weights.defined() ? per_sample_weights.stride(0) : 0);
});
});
AT_CUDA_CHECK(hipGetLastError());
return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, offset2bag, bag_size, max_indices);
}
Tensor _embedding_bag_dense_backward_cuda(const Tensor &grad_, const Tensor &indices,
const Tensor &offsets,
const Tensor &offset2bag,
const Tensor &bag_size_,
const Tensor &max_indices,
int64_t num_weights,
bool scale_grad_by_freq, int64_t mode,
const Tensor& per_sample_weights) {
// indices, offsets and offset2bag are assumed having correct dtypes and
// contiguous here due to the checks in _embedding_bag_backward in
// EmbeddingBag.cpp.
// Also see NOTE [ embedding_bag Native Functions ] in native_functions.yaml
// for more details.
Tensor grad = grad_.contiguous();
auto indices_arg = TensorArg(indices, "indices", 1);
auto offsets_arg = TensorArg(offsets, "offsets", 1);
auto grad_arg = TensorArg(grad, "grad", 1);
checkSameGPU("embedding_bag_cuda", grad_arg, offsets_arg);
checkSameGPU("embedding_bag_cuda", grad_arg, indices_arg);
switch (mode) {
case MODE_SUM:
case MODE_MEAN:
if (mode == MODE_MEAN)
AT_ASSERT(!per_sample_weights.defined());
return embedding_bag_backward_cuda_sum_avg(grad, indices, offset2bag,
bag_size_, num_weights, scale_grad_by_freq, mode, per_sample_weights);
case MODE_MAX:
AT_ASSERT(!per_sample_weights.defined());
return embedding_bag_backward_cuda_max(grad, max_indices, num_weights);
default:
AT_ERROR(
"Unknown mode for embedding_bag_backward_cuda ", mode);
}
}
template <typename scalar_t>
__inline__ __device__
static scalar_t warpReduceSum(scalar_t val) {
for (int offset = C10_WARP_SIZE/2; offset > 0; offset /= 2)
val += WARP_SHFL_DOWN(val, offset);
return val;
}
template <typename scalar_t>
__global__ static void _embedding_bag_per_sample_weights_backward_kernel(
const scalar_t* grad, int64_t grad_stride0, int64_t grad_stride1,
const scalar_t* weight, int64_t weight_stride0, int64_t weight_stride1,
const int64_t* indices, // contiguous
const int64_t* offset2bag, // contiguous
int64_t num_samples,
int64_t embedding_features,
scalar_t* output) {
using accscalar_t = acc_type<scalar_t, true>;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
const int warp = idx / C10_WARP_SIZE;
const int thread_in_warp = idx % C10_WARP_SIZE;
const int num_warps = blockDim.x * gridDim.x / C10_WARP_SIZE;
// Each warp is responsible for the accumulation of one sample.
// This involves doing one dot product between grad[bag_idx] and weight[embedding_idx].
for (int sample_idx = warp; sample_idx < num_samples; sample_idx += num_warps) {
accscalar_t result = 0.;
const int bag_idx = (int)offset2bag[sample_idx];
const int embedding_idx = (int)indices[sample_idx];
for (int feature_idx = thread_in_warp; feature_idx < embedding_features;
feature_idx += C10_WARP_SIZE) {
result +=
grad[grad_stride0 * bag_idx + grad_stride1 * feature_idx] *
weight[weight_stride0 * embedding_idx + weight_stride1 * feature_idx];
}
result = warpReduceSum<accscalar_t>(result);
if (thread_in_warp == 0) {
output[sample_idx] = result;
}
}
}
Tensor _embedding_bag_per_sample_weights_backward_cuda(
const Tensor& grad,
const Tensor& weight, // NB: embedding table, not per_sample_weights
const Tensor& indices,
const Tensor& offsets,
const Tensor& offset2bag,
int64_t mode) {
TORCH_CHECK(
mode == MODE_SUM,
"embedding_bag_backward: per_sample_weights only supported for mode='sum'");
AT_ASSERT(grad.dim() == 2);
auto embedding_features = grad.size(1);
AT_ASSERT(indices.dim() == 1);
auto num_samples = indices.size(0);
AT_ASSERT(weight.dim() == 2);
AT_ASSERT(weight.size(1) == embedding_features);
const int threads_per_block = 1024;
const int warps_per_block = threads_per_block / C10_WARP_SIZE;
dim3 block(threads_per_block);
dim3 grid((num_samples + warps_per_block - 1) / warps_per_block);
auto output = at::empty({num_samples}, grad.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "_embedding_bag_per_sample_weights_backward_cuda", [&]() {
hipLaunchKernelGGL(( _embedding_bag_per_sample_weights_backward_kernel<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad.data_ptr<scalar_t>(), grad.stride(0), grad.stride(1),
weight.data_ptr<scalar_t>(), weight.stride(0), weight.stride(1),
indices.data_ptr<int64_t>(),
offset2bag.data_ptr<int64_t>(),
num_samples,
embedding_features,
output.data_ptr<scalar_t>());
}
);
return output;
}
}
}
| fa55c98800259f51ebdebcf41b1ba198dcd801ce.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THC/THCAtomics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/device_vector.h>
#include <ATen/native/cuda/EmbeddingBackwardKernel.cuh>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
constexpr int MODE_SUM = 0;
constexpr int MODE_MEAN = 1;
constexpr int MODE_MAX = 2;
// This kernel assumes that all input tensors except `weight` and
// per_sample_weights are contiguous.
template <typename scalar_t>
__global__ void EmbeddingBag_updateOutputKernel(
int64_t *input, int64_t *offsets, scalar_t *weight, scalar_t *output,
int64_t *offset2bag, int64_t numIndices, int64_t numBags,
int64_t featureSize, int64_t weight_stide0, int64_t weight_stride1,
int mode, int64_t *bag_size, int64_t *max_indices,
scalar_t* per_sample_weights, int64_t per_sample_weights_stride) {
// the strategy here is that each bag x feature is handled by a single thread
using accscalar_t = acc_type<scalar_t, true>;
int64_t chunksPerBag = THCCeilDiv(featureSize, (int64_t)blockDim.x);
int64_t numChunks = numBags * chunksPerBag;
int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y;
int64_t chunkStride = gridDim.x * blockDim.y;
for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) {
int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x;
if (featureDim < featureSize) {
int64_t bag = chunk / chunksPerBag;
scalar_t *weightFeat = weight + featureDim * weight_stride1;
int64_t begin = bag == 0 ? 0 : offsets[bag]; // forces first offset to be 0 instead of asserting on it
int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices;
assert(end >= begin);
accscalar_t weightFeatSum = 0;
scalar_t weightFeatMax;
int64_t bag_size_ = 0;
int64_t maxWord = -1;
for (int64_t emb = begin; emb < end; emb++) {
const int64_t weightRow = input[emb] * weight_stide0;
scalar_t weightValue = weightFeat[weightRow];
if (mode == MODE_MAX) {
if (emb == begin || weightValue > weightFeatMax) {
weightFeatMax = weightValue;
maxWord = input[emb];
}
} else {
if (per_sample_weights) {
accscalar_t scaleWeightBy = static_cast<accscalar_t>(
per_sample_weights[emb * per_sample_weights_stride]);
weightFeatSum += scaleWeightBy * static_cast<accscalar_t>(weightValue);
} else {
weightFeatSum += static_cast<accscalar_t>(weightValue);
}
}
bag_size_++;
if (featureDim == 0) {
offset2bag[emb] = bag;
}
}
if (mode == MODE_MEAN) {
if (end == begin) {
bag_size[bag] = 0;
} else {
weightFeatSum = weightFeatSum / static_cast<accscalar_t>(bag_size_);
bag_size[bag] = bag_size_;
}
}
if (mode == MODE_MEAN || mode == MODE_SUM) {
output[bag * featureSize + featureDim] = static_cast<scalar_t>(weightFeatSum);
}
else if (mode == MODE_MAX) {
if (end == begin) {
// If bag is empty, set output to 0.
weightFeatMax = 0;
}
max_indices[bag * featureSize + featureDim] = maxWord;
output[bag * featureSize + featureDim] = weightFeatMax;
}
}
}
}
Tensor embedding_bag_backward_cuda_sum_avg(
const Tensor &grad,
const Tensor &indices,
const Tensor &offset2bag,
const Tensor &bag_size,
int64_t num_weights,
bool scale_grad_by_freq, int64_t mode,
const Tensor& per_sample_weights) {
auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
ptrdiff_t numel = indices.numel();
if (numel == 0) {
// all empty bags
return at::zeros({num_weights, grad.size(1)}, grad.options());
}
int64_t stride = grad_weight.stride(0);
auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data_ptr<int64_t>());
thrust::copy(policy, count_iter, count_iter + numel, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + numel, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data_ptr<int64_t>());
auto count_data = device_ptr(count.data_ptr<int64_t>());
thrust::inclusive_scan_by_key(policy, sorted_data, sorted_data + numel,
thrust::make_constant_iterator(1),
count_data);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy, thrust::make_reverse_iterator(sorted_data + numel),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + numel),
thrust::make_reverse_iterator(count_data + numel),
thrust::equal_to<int64_t>(), thrust::maximum<int64_t>());
}
return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices,
count, num_weights, /* padding_idx= */ -1, scale_grad_by_freq,
mode == MODE_MEAN, offset2bag, bag_size, per_sample_weights);
}
template <typename scalar_t>
__global__ void EmbeddingBag_accGradParametersKernel_max(
int64_t *max_indices, scalar_t *gradOutput,
scalar_t *gradWeight, int64_t stride, int64_t numBags) {
using accscalar_t = acc_type<scalar_t, true>;
int64_t chunksPerBag = THCCeilDiv(stride, (int64_t)blockDim.x);
int64_t numChunks = numBags * chunksPerBag;
int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y;
int64_t chunkStride = gridDim.x * blockDim.y;
for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) {
int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x;
if (featureDim < stride) {
int64_t bag = chunk / chunksPerBag;
int64_t word_idx = max_indices[bag * stride + featureDim];
if (word_idx >= 0) {
// If bag is empty, we have max_indices[idx] set to -1 in forward.
gpuAtomicAdd(&(gradWeight[word_idx * stride + featureDim]),
gradOutput[bag * stride + featureDim]);
}
}
}
}
Tensor embedding_bag_backward_cuda_max(const Tensor &grad,
const Tensor &max_indices,
int64_t num_weights) {
auto grad_weight = at::zeros({num_weights, grad.size(1)}, grad.options());
int64_t stride = grad_weight.stride(0);
int64_t numBags = grad.size(0);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
#ifdef __HIP_PLATFORM_HCC__
dim3 block = dim3(64, 4);
#else
dim3 block = dim3(32, 8);
#endif
int grid = 1024;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "embedding_bag_backward_cuda_max", [&] {
EmbeddingBag_accGradParametersKernel_max<
scalar_t><<<grid, block, 0, stream>>>(
max_indices.data_ptr<int64_t>(), grad.data_ptr<scalar_t>(),
grad_weight.data_ptr<scalar_t>(), stride, numBags);
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_weight;
}
}
// Assumes all input tensors are contiguous.
// See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details
std::tuple<Tensor, Tensor, Tensor, Tensor>
_embedding_bag_cuda(const Tensor &weight, const Tensor &indices,
const Tensor &offsets, const bool scale_grad_by_freq,
const int64_t mode, bool sparse,
const Tensor& per_sample_weights,
bool include_last_offset) {
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_bag_cuda", indices_arg, kLong);
auto offsets_arg = TensorArg(offsets, "offsets", 1);
checkScalarType("embedding_bag_cuda", offsets_arg, kLong);
auto weight_arg = TensorArg(weight, "weight", 1);
checkSameGPU("embedding_bag_cuda", weight_arg, indices_arg);
checkSameGPU("embedding_bag_cuda", weight_arg, offsets_arg);
int64_t numIndices = indices.size(0);
int64_t numBags = offsets.size(0);
if (include_last_offset) {
// Check https://github.com/pytorch/pytorch/issues/29019
// We plan to add one more element in offsets, which is equal to the size of
// indices. Currently for cuda devices, we still use the legacy
// implementation even this flag is enabled.
TORCH_CHECK(
numBags >= 1, "include_last_offset: numBags should be at least 1");
numBags -= 1;
}
int64_t featureSize = weight.size(1);
auto bag_size = at::empty(offsets.sizes(), indices.options());
auto offset2bag =
at::empty({indices.size(0)}, indices.options()); // offset2bag = [0 0 0 0 0]
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto output = at::empty({numBags, featureSize}, weight.options());
Tensor max_indices;
if (mode == MODE_MAX) {
max_indices = at::empty({numBags, featureSize}, indices.options());
} else {
// No need to allocate if we aren't doing a backwards pass
max_indices = at::empty({0}, indices.options());
}
#ifdef __HIP_PLATFORM_HCC__
dim3 block = dim3(64, 4);
#else
dim3 block = dim3(32, 8);
#endif
int grid = 1024;
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, weight.scalar_type(), "embedding_bag_cuda", [&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "embedding_bag_cuda", [&] {
EmbeddingBag_updateOutputKernel<scalar_t><<<grid, block, 0, stream>>>(
indices.data_ptr<int64_t>(), offsets.data_ptr<int64_t>(),
weight.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(),
offset2bag.data_ptr<int64_t>(), numIndices, numBags, featureSize,
weight.stride(0), weight.stride(1), mode, bag_size.data_ptr<int64_t>(),
mode == MODE_MAX ? max_indices.data_ptr<int64_t>() : NULL,
per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL,
per_sample_weights.defined() ? per_sample_weights.stride(0) : 0);
});
});
AT_CUDA_CHECK(cudaGetLastError());
return std::tuple<Tensor, Tensor, Tensor, Tensor>(output, offset2bag, bag_size, max_indices);
}
Tensor _embedding_bag_dense_backward_cuda(const Tensor &grad_, const Tensor &indices,
const Tensor &offsets,
const Tensor &offset2bag,
const Tensor &bag_size_,
const Tensor &max_indices,
int64_t num_weights,
bool scale_grad_by_freq, int64_t mode,
const Tensor& per_sample_weights) {
// indices, offsets and offset2bag are assumed having correct dtypes and
// contiguous here due to the checks in _embedding_bag_backward in
// EmbeddingBag.cpp.
// Also see NOTE [ embedding_bag Native Functions ] in native_functions.yaml
// for more details.
Tensor grad = grad_.contiguous();
auto indices_arg = TensorArg(indices, "indices", 1);
auto offsets_arg = TensorArg(offsets, "offsets", 1);
auto grad_arg = TensorArg(grad, "grad", 1);
checkSameGPU("embedding_bag_cuda", grad_arg, offsets_arg);
checkSameGPU("embedding_bag_cuda", grad_arg, indices_arg);
switch (mode) {
case MODE_SUM:
case MODE_MEAN:
if (mode == MODE_MEAN)
AT_ASSERT(!per_sample_weights.defined());
return embedding_bag_backward_cuda_sum_avg(grad, indices, offset2bag,
bag_size_, num_weights, scale_grad_by_freq, mode, per_sample_weights);
case MODE_MAX:
AT_ASSERT(!per_sample_weights.defined());
return embedding_bag_backward_cuda_max(grad, max_indices, num_weights);
default:
AT_ERROR(
"Unknown mode for embedding_bag_backward_cuda ", mode);
}
}
template <typename scalar_t>
__inline__ __device__
static scalar_t warpReduceSum(scalar_t val) {
for (int offset = C10_WARP_SIZE/2; offset > 0; offset /= 2)
val += WARP_SHFL_DOWN(val, offset);
return val;
}
template <typename scalar_t>
__global__ static void _embedding_bag_per_sample_weights_backward_kernel(
const scalar_t* grad, int64_t grad_stride0, int64_t grad_stride1,
const scalar_t* weight, int64_t weight_stride0, int64_t weight_stride1,
const int64_t* indices, // contiguous
const int64_t* offset2bag, // contiguous
int64_t num_samples,
int64_t embedding_features,
scalar_t* output) {
using accscalar_t = acc_type<scalar_t, true>;
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
const int warp = idx / C10_WARP_SIZE;
const int thread_in_warp = idx % C10_WARP_SIZE;
const int num_warps = blockDim.x * gridDim.x / C10_WARP_SIZE;
// Each warp is responsible for the accumulation of one sample.
// This involves doing one dot product between grad[bag_idx] and weight[embedding_idx].
for (int sample_idx = warp; sample_idx < num_samples; sample_idx += num_warps) {
accscalar_t result = 0.;
const int bag_idx = (int)offset2bag[sample_idx];
const int embedding_idx = (int)indices[sample_idx];
for (int feature_idx = thread_in_warp; feature_idx < embedding_features;
feature_idx += C10_WARP_SIZE) {
result +=
grad[grad_stride0 * bag_idx + grad_stride1 * feature_idx] *
weight[weight_stride0 * embedding_idx + weight_stride1 * feature_idx];
}
result = warpReduceSum<accscalar_t>(result);
if (thread_in_warp == 0) {
output[sample_idx] = result;
}
}
}
Tensor _embedding_bag_per_sample_weights_backward_cuda(
const Tensor& grad,
const Tensor& weight, // NB: embedding table, not per_sample_weights
const Tensor& indices,
const Tensor& offsets,
const Tensor& offset2bag,
int64_t mode) {
TORCH_CHECK(
mode == MODE_SUM,
"embedding_bag_backward: per_sample_weights only supported for mode='sum'");
AT_ASSERT(grad.dim() == 2);
auto embedding_features = grad.size(1);
AT_ASSERT(indices.dim() == 1);
auto num_samples = indices.size(0);
AT_ASSERT(weight.dim() == 2);
AT_ASSERT(weight.size(1) == embedding_features);
const int threads_per_block = 1024;
const int warps_per_block = threads_per_block / C10_WARP_SIZE;
dim3 block(threads_per_block);
dim3 grid((num_samples + warps_per_block - 1) / warps_per_block);
auto output = at::empty({num_samples}, grad.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "_embedding_bag_per_sample_weights_backward_cuda", [&]() {
_embedding_bag_per_sample_weights_backward_kernel<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
grad.data_ptr<scalar_t>(), grad.stride(0), grad.stride(1),
weight.data_ptr<scalar_t>(), weight.stride(0), weight.stride(1),
indices.data_ptr<int64_t>(),
offset2bag.data_ptr<int64_t>(),
num_samples,
embedding_features,
output.data_ptr<scalar_t>());
}
);
return output;
}
}
}
|
b89036ffde4f38a7186c9a64bdf245bff2887c72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../gpu_inc/post_filter.cuh"
__global__ void cu_subpixel(float *d_cost, uchar *d_disp, float *d_filtered_disp, int img_w, int img_h, int max_disp, int invalid)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
int col = index % img_w;
int row = index / img_w;
int d = d_disp[index];
if (d > max_disp - 1)
{
d_filtered_disp[index] = invalid;
}
else if (!d || d == max_disp - 1)
{
d_filtered_disp[index] = d;
}
else
{
int idx = row * img_w * max_disp + col * max_disp + d;
float cost_d = d_cost[idx];
float cost_d_sub = d_cost[idx - 1];
float cost_d_plus = d_cost[idx + 1];
d_filtered_disp[index] = d + (cost_d_sub - cost_d_plus) / (2 * (cost_d_sub + cost_d_plus - 2 * cost_d));
if (d_filtered_disp[index] > max_disp - 1)
{
d_filtered_disp[index] = max_disp - 1;
}
}
}
__device__ void sort_quick(float *x, int left_idx, int right_idx)
{
int i = left_idx, j = right_idx;
float pivot = x[(left_idx + right_idx) / 2];
while (i <= j)
{
while (x[i] < pivot)
i++;
while (x[j] > pivot)
j--;
if (i <= j) {
float temp;
temp = x[i];
x[i] = x[j];
x[j] = temp;
i++;
j--;
}
};
if (left_idx < j)
sort_quick(x, left_idx, j);
if (i < right_idx)
sort_quick(x, i, right_idx);
}
__global__ void cu_median_filter(float *d_filtered_disp, int img_w, int img_h, int max_disp, int win_w, int win_h)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
int col = index % img_w;
int row = index / img_w;
if (row < win_h / 2 || row > img_h - win_h / 2 - 1 || col < win_w / 2 || col > img_w - win_w / 2) return;
if (d_filtered_disp[index] <= max_disp - 1) return;
float v[25];
int valid_cnt = 0;
for (int m = row - win_h / 2; m <= row + win_h / 2; m++)
{
for (int n = col - win_w / 2; n <= col + win_w / 2; n++)
{
int idx = m * img_w + n;
if (d_filtered_disp[idx] <= max_disp - 1)
{
v[valid_cnt++] = d_filtered_disp[idx];
}
}
}
if (valid_cnt > win_w * win_h / 2)
{
sort_quick(v, 0, valid_cnt);
d_filtered_disp[index] = v[valid_cnt / 2];
}
}
__global__ void cu_speckle_filter_init(int *label, int *area, int img_w, int img_h)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
label[index] = index;
area[index] = 0;
}
__device__ int Find(int i, int *d_label)
{
while (i != d_label[i])
{
i = d_label[i];
}
return i;
}
__device__ void Union(int i, int j, int *d_label) // join i to j
{
int label_a = Find(i, d_label);
int label_b = Find(j, d_label);
if (label_a != label_b)
{
//atomicExch(&d_label[label_a], label_b);
atomicMin(&d_label[label_a], label_b);
}
}
__global__ void cu_speckle_filter_union_find(float *d_filtered_disp, int *label, int *area, int img_w, int img_h, int max_dis)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
int row = index / img_w;
int col = index % img_w;
if (row > 0 && fabs(d_filtered_disp[index] - d_filtered_disp[index - img_w]) < max_dis)
{
Union(index - img_w, index, label);
}
if (row < img_h - 1 && fabs(d_filtered_disp[index] - d_filtered_disp[index + img_w]) < max_dis)
{
Union(index + img_w, index, label);
}
if (col > 0 && fabs(d_filtered_disp[index] - d_filtered_disp[index - 1]) < max_dis)
{
Union(index - 1, index, label);
}
if (col < img_w - 1 && fabs(d_filtered_disp[index] - d_filtered_disp[index + 1]) < max_dis)
{
Union(index + 1, index, label);
}
}
__global__ void cu_speckle_filter_sum_up(int *label, int *area, int img_w, int img_h)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
label[index] = Find(index, label);
atomicAdd(&area[label[index]], 1);
}
__global__ void cu_speckle_filter_end(float *d_filtered_disp, int *label, int *area, int img_w, int img_h, int value, int max_size)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
if (area[label[index]] <= max_size)
{
d_filtered_disp[index] = value;
}
} | b89036ffde4f38a7186c9a64bdf245bff2887c72.cu | #include "../gpu_inc/post_filter.cuh"
__global__ void cu_subpixel(float *d_cost, uchar *d_disp, float *d_filtered_disp, int img_w, int img_h, int max_disp, int invalid)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
int col = index % img_w;
int row = index / img_w;
int d = d_disp[index];
if (d > max_disp - 1)
{
d_filtered_disp[index] = invalid;
}
else if (!d || d == max_disp - 1)
{
d_filtered_disp[index] = d;
}
else
{
int idx = row * img_w * max_disp + col * max_disp + d;
float cost_d = d_cost[idx];
float cost_d_sub = d_cost[idx - 1];
float cost_d_plus = d_cost[idx + 1];
d_filtered_disp[index] = d + (cost_d_sub - cost_d_plus) / (2 * (cost_d_sub + cost_d_plus - 2 * cost_d));
if (d_filtered_disp[index] > max_disp - 1)
{
d_filtered_disp[index] = max_disp - 1;
}
}
}
__device__ void sort_quick(float *x, int left_idx, int right_idx)
{
int i = left_idx, j = right_idx;
float pivot = x[(left_idx + right_idx) / 2];
while (i <= j)
{
while (x[i] < pivot)
i++;
while (x[j] > pivot)
j--;
if (i <= j) {
float temp;
temp = x[i];
x[i] = x[j];
x[j] = temp;
i++;
j--;
}
};
if (left_idx < j)
sort_quick(x, left_idx, j);
if (i < right_idx)
sort_quick(x, i, right_idx);
}
__global__ void cu_median_filter(float *d_filtered_disp, int img_w, int img_h, int max_disp, int win_w, int win_h)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
int col = index % img_w;
int row = index / img_w;
if (row < win_h / 2 || row > img_h - win_h / 2 - 1 || col < win_w / 2 || col > img_w - win_w / 2) return;
if (d_filtered_disp[index] <= max_disp - 1) return;
float v[25];
int valid_cnt = 0;
for (int m = row - win_h / 2; m <= row + win_h / 2; m++)
{
for (int n = col - win_w / 2; n <= col + win_w / 2; n++)
{
int idx = m * img_w + n;
if (d_filtered_disp[idx] <= max_disp - 1)
{
v[valid_cnt++] = d_filtered_disp[idx];
}
}
}
if (valid_cnt > win_w * win_h / 2)
{
sort_quick(v, 0, valid_cnt);
d_filtered_disp[index] = v[valid_cnt / 2];
}
}
__global__ void cu_speckle_filter_init(int *label, int *area, int img_w, int img_h)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
label[index] = index;
area[index] = 0;
}
__device__ int Find(int i, int *d_label)
{
while (i != d_label[i])
{
i = d_label[i];
}
return i;
}
__device__ void Union(int i, int j, int *d_label) // join i to j
{
int label_a = Find(i, d_label);
int label_b = Find(j, d_label);
if (label_a != label_b)
{
//atomicExch(&d_label[label_a], label_b);
atomicMin(&d_label[label_a], label_b);
}
}
__global__ void cu_speckle_filter_union_find(float *d_filtered_disp, int *label, int *area, int img_w, int img_h, int max_dis)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
int row = index / img_w;
int col = index % img_w;
if (row > 0 && fabs(d_filtered_disp[index] - d_filtered_disp[index - img_w]) < max_dis)
{
Union(index - img_w, index, label);
}
if (row < img_h - 1 && fabs(d_filtered_disp[index] - d_filtered_disp[index + img_w]) < max_dis)
{
Union(index + img_w, index, label);
}
if (col > 0 && fabs(d_filtered_disp[index] - d_filtered_disp[index - 1]) < max_dis)
{
Union(index - 1, index, label);
}
if (col < img_w - 1 && fabs(d_filtered_disp[index] - d_filtered_disp[index + 1]) < max_dis)
{
Union(index + 1, index, label);
}
}
__global__ void cu_speckle_filter_sum_up(int *label, int *area, int img_w, int img_h)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
label[index] = Find(index, label);
atomicAdd(&area[label[index]], 1);
}
__global__ void cu_speckle_filter_end(float *d_filtered_disp, int *label, int *area, int img_w, int img_h, int value, int max_size)
{
int index = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
if (index > img_w * img_h - 1) return;
if (area[label[index]] <= max_size)
{
d_filtered_disp[index] = value;
}
} |
a8a9a132882d76544787a872864a45e2a0d80739.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/berhu_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Forward_gpu_kernel(
const int nthreads,
const Dtype* const data_label,
Dtype* data_diff,
Dtype* bad_pixel_data,
const int num,
const int channels,
const int height,
const int width,
const bool has_max_label,
const bool has_min_label,
const bool has_invalid_label,
const Dtype max_label,
const Dtype min_label,
const Dtype invalid_label,
const Dtype C,
const bool has_h_rate,
const Dtype H){
CUDA_KERNEL_LOOP(index, nthreads){
const int n = index / height;
const int h = index % height;
const int data_offset = (n*channels*height+h)*width;
const int bad_pixel_idx = index;
const int interval = height * width;
// Iter the width and channels
for (int w = 0; w < width; w++){
// Iter the channels
int err_counter = 0;
for (int c = 0; c < channels; c++){
const int idx = data_offset + c * interval + w;
Dtype dataval = data_label[idx];
Dtype diffval = data_diff[idx];
if (has_max_label && dataval > max_label){
err_counter++;
}else if(has_min_label && dataval < min_label){
err_counter++;
}else if(has_invalid_label && fabs(dataval - invalid_label) < 0.0001){
err_counter++;
}
// alter the diff value
if (diffval > 0 && diffval < C){
// L1
data_diff[idx] = C;
}else if(diffval < 0 && -diffval < C){
data_diff[idx] = -C;
}
if (has_h_rate && diffval > H){
data_diff[idx] = H;
}else if(has_h_rate && -diffval > H){
data_diff[idx] = -H;
}
}
// Only if all channels invalid, the pixel will be considered
// as invalid
if(err_counter == channels){
bad_pixel_data[bad_pixel_idx] += channels;
for (int c = 0; c < channels; c++){
const int idx = data_offset + c * interval + w;
data_diff[idx] = 0;
}
}
}
}
}
template <typename Dtype>
void BerhuLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(),
bottom[1]->gpu_data(),
diff_.mutable_gpu_data());
Dtype max_diff = 0;
switch(c_rate_mode_){
case MAX:
// Get the abs max diff to determine the C
max_diff = caffe_gpu_amax(count, diff_.gpu_data(), 1);
// Calc the Threshold C
break;
case AVE:
// Calc the mean of the abs diff
caffe_gpu_asum(count, diff_.gpu_data(), &max_diff);
max_diff /= count;
break;
default:
LOG(FATAL) << "False c_rate_mode";
break;
}
Dtype C = fabs(max_diff * c_rate_);
Dtype H = fabs(max_diff * h_rate_);
Dtype* data_diff = diff_.mutable_gpu_data();
// const Dtype* data_pred = bottom[0]->cpu_data();
const Dtype* data_label = bottom[1]->gpu_data();
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
const int height = bottom[0]->height();
const int width = bottom[0]->width();
// The number of kernel is num * height, process a row each time
const int num_kernels = num * height;
// Set the bad_pixel_ buffer to zero
Dtype* bad_pixel_data = bad_pixel_.mutable_gpu_data();
caffe_gpu_set(bad_pixel_.count(), Dtype(0), bad_pixel_data);
// Find the bad pixel and alter the diff
hipLaunchKernelGGL(( Forward_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels,
data_label,
data_diff,
bad_pixel_data,
num,
channels,
height,
width,
has_max_label_,
has_min_label_,
has_invalid_label_,
max_label_,
min_label_,
invalid_label_,
C,
has_h_rate_,
H);
Dtype bad_pixel_count;
caffe_gpu_asum(bad_pixel_.count(), bad_pixel_data, &bad_pixel_count);
Dtype dot;
caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot);
Dtype loss = dot / Dtype(2) / (count-bad_pixel_count);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void BerhuLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
Dtype alpha;
if (normalize_){
alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->count();
}else{
alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
}
caffe_gpu_axpby(
bottom[i]->count(), // count
alpha, // alpha
diff_.gpu_data(), // a
Dtype(0), // beta
bottom[i]->mutable_gpu_diff()); // b
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BerhuLossLayer);
} // namespace caffe
| a8a9a132882d76544787a872864a45e2a0d80739.cu | #include <vector>
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/berhu_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Forward_gpu_kernel(
const int nthreads,
const Dtype* const data_label,
Dtype* data_diff,
Dtype* bad_pixel_data,
const int num,
const int channels,
const int height,
const int width,
const bool has_max_label,
const bool has_min_label,
const bool has_invalid_label,
const Dtype max_label,
const Dtype min_label,
const Dtype invalid_label,
const Dtype C,
const bool has_h_rate,
const Dtype H){
CUDA_KERNEL_LOOP(index, nthreads){
const int n = index / height;
const int h = index % height;
const int data_offset = (n*channels*height+h)*width;
const int bad_pixel_idx = index;
const int interval = height * width;
// Iter the width and channels
for (int w = 0; w < width; w++){
// Iter the channels
int err_counter = 0;
for (int c = 0; c < channels; c++){
const int idx = data_offset + c * interval + w;
Dtype dataval = data_label[idx];
Dtype diffval = data_diff[idx];
if (has_max_label && dataval > max_label){
err_counter++;
}else if(has_min_label && dataval < min_label){
err_counter++;
}else if(has_invalid_label && fabs(dataval - invalid_label) < 0.0001){
err_counter++;
}
// alter the diff value
if (diffval > 0 && diffval < C){
// L1
data_diff[idx] = C;
}else if(diffval < 0 && -diffval < C){
data_diff[idx] = -C;
}
if (has_h_rate && diffval > H){
data_diff[idx] = H;
}else if(has_h_rate && -diffval > H){
data_diff[idx] = -H;
}
}
// Only if all channels invalid, the pixel will be considered
// as invalid
if(err_counter == channels){
bad_pixel_data[bad_pixel_idx] += channels;
for (int c = 0; c < channels; c++){
const int idx = data_offset + c * interval + w;
data_diff[idx] = 0;
}
}
}
}
}
template <typename Dtype>
void BerhuLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(),
bottom[1]->gpu_data(),
diff_.mutable_gpu_data());
Dtype max_diff = 0;
switch(c_rate_mode_){
case MAX:
// Get the abs max diff to determine the C
max_diff = caffe_gpu_amax(count, diff_.gpu_data(), 1);
// Calc the Threshold C
break;
case AVE:
// Calc the mean of the abs diff
caffe_gpu_asum(count, diff_.gpu_data(), &max_diff);
max_diff /= count;
break;
default:
LOG(FATAL) << "False c_rate_mode";
break;
}
Dtype C = fabs(max_diff * c_rate_);
Dtype H = fabs(max_diff * h_rate_);
Dtype* data_diff = diff_.mutable_gpu_data();
// const Dtype* data_pred = bottom[0]->cpu_data();
const Dtype* data_label = bottom[1]->gpu_data();
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
const int height = bottom[0]->height();
const int width = bottom[0]->width();
// The number of kernel is num * height, process a row each time
const int num_kernels = num * height;
// Set the bad_pixel_ buffer to zero
Dtype* bad_pixel_data = bad_pixel_.mutable_gpu_data();
caffe_gpu_set(bad_pixel_.count(), Dtype(0), bad_pixel_data);
// Find the bad pixel and alter the diff
Forward_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels,
data_label,
data_diff,
bad_pixel_data,
num,
channels,
height,
width,
has_max_label_,
has_min_label_,
has_invalid_label_,
max_label_,
min_label_,
invalid_label_,
C,
has_h_rate_,
H);
Dtype bad_pixel_count;
caffe_gpu_asum(bad_pixel_.count(), bad_pixel_data, &bad_pixel_count);
Dtype dot;
caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot);
Dtype loss = dot / Dtype(2) / (count-bad_pixel_count);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void BerhuLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const Dtype sign = (i == 0) ? 1 : -1;
Dtype alpha;
if (normalize_){
alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->count();
}else{
alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num();
}
caffe_gpu_axpby(
bottom[i]->count(), // count
alpha, // alpha
diff_.gpu_data(), // a
Dtype(0), // beta
bottom[i]->mutable_gpu_diff()); // b
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BerhuLossLayer);
} // namespace caffe
|
8f283d1e264eae53d5730a33c2d24d3786812ee9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrixTrans(float * M,float * MT)
{
int val=0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
MT[row + col*N] = 0;
if (row < N && col < N)
{
val = M[col + row*N];
MT[row + col*N] = val;
}
} | 8f283d1e264eae53d5730a33c2d24d3786812ee9.cu | #include "includes.h"
__global__ void matrixTrans(float * M,float * MT)
{
int val=0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
MT[row + col*N] = 0;
if (row < N && col < N)
{
val = M[col + row*N];
MT[row + col*N] = val;
}
} |
ef2dac92c64494cc223873affa01c3ff256552dc.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <hip/hip_runtime.h>
#include <hipcub/hipcub.hpp>
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/Dispatch.h"
#include "open3d/core/Tensor.h"
#include "open3d/core/kernel/CUDALauncher.cuh"
#include "open3d/t/geometry/kernel/GeometryIndexer.h"
#include "open3d/t/geometry/kernel/GeometryMacros.h"
#include "open3d/t/pipelines/kernel/RGBDOdometryImpl.h"
#include "open3d/t/pipelines/kernel/RGBDOdometryJacobianImpl.h"
#include "open3d/t/pipelines/kernel/TransformationConverter.h"
namespace open3d {
namespace t {
namespace pipelines {
namespace kernel {
namespace odometry {
void ReduceAndSolve6x6(float* A_reduction,
core::Tensor& delta,
core::Tensor& residual,
int64_t n,
const core::Device& device) {
core::Tensor output_29 =
core::Tensor::Empty({29}, core::Dtype::Float32, device);
float* output_29_data = output_29.GetDataPtr<float>();
// Reduction of {29, N} to {29}.
for (int i = 0; i < 29; i++) {
// Determine temporary device storage requirements.
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes,
A_reduction + i * n, output_29_data + i, n);
// Allocate temporary storage.
hipMalloc(&d_temp_storage, temp_storage_bytes);
// Run sum-reduction.
hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes,
A_reduction + i * n, output_29_data + i, n);
hipFree(d_temp_storage);
}
DecodeAndSolve6x6(output_29, delta, residual);
}
template <typename T>
__device__ inline void WarpReduceSum(volatile T* local_sum, const int tid) {
local_sum[tid] += local_sum[tid + 32];
local_sum[tid] += local_sum[tid + 16];
local_sum[tid] += local_sum[tid + 8];
local_sum[tid] += local_sum[tid + 4];
local_sum[tid] += local_sum[tid + 2];
local_sum[tid] += local_sum[tid + 1];
}
template <typename T, size_t BLOCK_SIZE>
__device__ inline void BlockReduceSum(const int tid, volatile T* local_sum) {
if (BLOCK_SIZE >= 512) {
if (tid < 256) {
local_sum[tid] += local_sum[tid + 256];
}
__syncthreads();
}
if (BLOCK_SIZE >= 256) {
if (tid < 128) {
local_sum[tid] += local_sum[tid + 128];
}
__syncthreads();
}
if (BLOCK_SIZE >= 128) {
if (tid < 64) {
local_sum[tid] += local_sum[tid + 64];
}
__syncthreads();
}
if (tid < 32) {
WarpReduceSum<T>(local_sum, tid);
}
}
template <typename T, size_t BLOCK_SIZE>
__device__ inline void BlockReduceSum(const int tid,
volatile T* local_sum0,
volatile T* local_sum1) {
if (BLOCK_SIZE >= 512) {
if (tid < 256) {
local_sum0[tid] += local_sum0[tid + 256];
local_sum1[tid] += local_sum1[tid + 256];
}
__syncthreads();
}
if (BLOCK_SIZE >= 256) {
if (tid < 128) {
local_sum0[tid] += local_sum0[tid + 128];
local_sum1[tid] += local_sum1[tid + 128];
}
__syncthreads();
}
if (BLOCK_SIZE >= 128) {
if (tid < 64) {
local_sum0[tid] += local_sum0[tid + 64];
local_sum1[tid] += local_sum1[tid + 64];
}
__syncthreads();
}
if (tid < 32) {
WarpReduceSum<float>(local_sum0, tid);
WarpReduceSum<float>(local_sum1, tid);
}
}
template <typename T, size_t BLOCK_SIZE>
__device__ inline void BlockReduceSum(const int tid,
volatile T* local_sum0,
volatile T* local_sum1,
volatile T* local_sum2) {
if (BLOCK_SIZE >= 512) {
if (tid < 256) {
local_sum0[tid] += local_sum0[tid + 256];
local_sum1[tid] += local_sum1[tid + 256];
local_sum2[tid] += local_sum2[tid + 256];
}
__syncthreads();
}
if (BLOCK_SIZE >= 256) {
if (tid < 128) {
local_sum0[tid] += local_sum0[tid + 128];
local_sum1[tid] += local_sum1[tid + 128];
local_sum2[tid] += local_sum2[tid + 128];
}
__syncthreads();
}
if (BLOCK_SIZE >= 128) {
if (tid < 64) {
local_sum0[tid] += local_sum0[tid + 64];
local_sum1[tid] += local_sum1[tid + 64];
local_sum2[tid] += local_sum2[tid + 64];
}
__syncthreads();
}
if (tid < 32) {
WarpReduceSum<float>(local_sum0, tid);
WarpReduceSum<float>(local_sum1, tid);
WarpReduceSum<float>(local_sum2, tid);
}
}
__global__ void ComputePosePointToPlaneCUDAKernel(
NDArrayIndexer source_vertex_indexer,
NDArrayIndexer target_vertex_indexer,
NDArrayIndexer target_normal_indexer,
TransformIndexer ti,
float* global_sum,
int rows,
int cols,
float depth_diff) {
const int kBlockSize = 256;
__shared__ float local_sum0[kBlockSize];
__shared__ float local_sum1[kBlockSize];
__shared__ float local_sum2[kBlockSize];
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
local_sum0[tid] = 0;
local_sum1[tid] = 0;
local_sum2[tid] = 0;
if (y >= rows || x >= cols) return;
float J[6] = {0}, reduction[21 + 6 + 2];
float r = 0;
bool valid = GetJacobianPointToPlane(
x, y, depth_diff, source_vertex_indexer, target_vertex_indexer,
target_normal_indexer, ti, J, r);
// Dump J, r into JtJ and Jtr
int offset = 0;
for (int i = 0; i < 6; ++i) {
for (int j = 0; j <= i; ++j) {
reduction[offset++] = J[i] * J[j];
}
}
for (int i = 0; i < 6; ++i) {
reduction[offset++] = J[i] * r;
}
reduction[offset++] = r * r;
reduction[offset++] = valid;
// Sum reduction: JtJ(21) and Jtr(6)
for (size_t i = 0; i < 27; i += 3) {
local_sum0[tid] = valid ? reduction[i + 0] : 0;
local_sum1[tid] = valid ? reduction[i + 1] : 0;
local_sum2[tid] = valid ? reduction[i + 2] : 0;
__syncthreads();
BlockReduceSum<float, kBlockSize>(tid, local_sum0, local_sum1,
local_sum2);
if (tid == 0) {
atomicAdd(&global_sum[i + 0], local_sum0[0]);
atomicAdd(&global_sum[i + 1], local_sum1[0]);
atomicAdd(&global_sum[i + 2], local_sum2[0]);
}
__syncthreads();
}
// Sum reduction: residual(1) and inlier(1)
{
local_sum0[tid] = valid ? reduction[27] : 0;
local_sum1[tid] = valid ? reduction[28] : 0;
__syncthreads();
BlockReduceSum<float, kBlockSize>(tid, local_sum0, local_sum1);
if (tid == 0) {
atomicAdd(&global_sum[27], local_sum0[0]);
atomicAdd(&global_sum[28], local_sum1[0]);
}
__syncthreads();
}
}
void ComputePosePointToPlaneCUDA(const core::Tensor& source_vertex_map,
const core::Tensor& target_vertex_map,
const core::Tensor& target_normal_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
core::Tensor& residual,
float depth_diff) {
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
NDArrayIndexer target_vertex_indexer(target_vertex_map, 2);
NDArrayIndexer target_normal_indexer(target_normal_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target;
TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
// A_29xN is a {29, N} shaped tensor, which is later reduced to {29} where
// [0, 20] elements are used to construct {6,6} shaped symmetric AtA
// matrix, [21, 26] elements are used to construct {6} AtB matrix, element
// [27] stores residual and element [28] stores count.
core::Tensor global_sum =
core::Tensor::Zeros({29}, core::Dtype::Float32, device);
float* global_sum_ptr = global_sum.GetDataPtr<float>();
const int kThreadSize = 16;
const dim3 blocks((cols + kThreadSize - 1) / kThreadSize,
(rows + kThreadSize - 1) / kThreadSize);
const dim3 threads(kThreadSize, kThreadSize);
hipLaunchKernelGGL(( ComputePosePointToPlaneCUDAKernel), dim3(blocks), dim3(threads), 0, 0,
source_vertex_indexer, target_vertex_indexer, target_normal_indexer,
ti, global_sum_ptr, rows, cols, depth_diff);
OPEN3D_CUDA_CHECK(hipDeviceSynchronize());
DecodeAndSolve6x6(global_sum, delta, residual);
}
void ComputePoseIntensityCUDA(const core::Tensor& source_depth,
const core::Tensor& target_depth,
const core::Tensor& source_intensity,
const core::Tensor& target_intensity,
const core::Tensor& target_intensity_dx,
const core::Tensor& target_intensity_dy,
const core::Tensor& source_vertex_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
core::Tensor& residual,
float depth_diff) {
NDArrayIndexer source_depth_indexer(source_depth, 2);
NDArrayIndexer target_depth_indexer(target_depth, 2);
NDArrayIndexer source_intensity_indexer(source_intensity, 2);
NDArrayIndexer target_intensity_indexer(target_intensity, 2);
NDArrayIndexer target_intensity_dx_indexer(target_intensity_dx, 2);
NDArrayIndexer target_intensity_dy_indexer(target_intensity_dy, 2);
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target;
t::geometry::kernel::TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
const int64_t n = rows * cols;
// A_29xN is a {29, N} shaped tensor, which is later reduced to
// {29} where [0, 20] elements are used to construct {6,6} shaped
// symmetric AtA matrix, [21, 26] elements are used to construct {6} AtB
// matrix, element [27] stores residual and element [28] stores count.
core::Tensor A_29xN =
core::Tensor::Empty({29, n}, core::Dtype::Float32, device);
float* A_reduction = A_29xN.GetDataPtr<float>();
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
float J_I[6];
float r_I;
bool valid = GetJacobianIntensity(
workload_idx, cols, depth_diff, source_depth_indexer,
target_depth_indexer, source_intensity_indexer,
target_intensity_indexer, target_intensity_dx_indexer,
target_intensity_dy_indexer, source_vertex_indexer, ti,
J_I, r_I);
if (valid) {
for (int i = 0, j = 0; j < 6; j++) {
for (int k = 0; k <= j; k++) {
A_reduction[n * i + workload_idx] = J_I[j] * J_I[k];
i++;
}
A_reduction[n * (21 + j) + workload_idx] = J_I[j] * r_I;
}
A_reduction[n * 27 + workload_idx] = r_I * r_I;
A_reduction[n * 28 + workload_idx] = 1;
} else {
for (int i = 0; i < 29; i++) {
A_reduction[n * i + workload_idx] = 0;
}
}
});
ReduceAndSolve6x6(A_reduction, delta, residual, n, device);
}
void ComputePoseHybridCUDA(const core::Tensor& source_depth,
const core::Tensor& target_depth,
const core::Tensor& source_intensity,
const core::Tensor& target_intensity,
const core::Tensor& target_depth_dx,
const core::Tensor& target_depth_dy,
const core::Tensor& target_intensity_dx,
const core::Tensor& target_intensity_dy,
const core::Tensor& source_vertex_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
core::Tensor& residual,
float depth_diff) {
NDArrayIndexer source_depth_indexer(source_depth, 2);
NDArrayIndexer target_depth_indexer(target_depth, 2);
NDArrayIndexer source_intensity_indexer(source_intensity, 2);
NDArrayIndexer target_intensity_indexer(target_intensity, 2);
NDArrayIndexer target_depth_dx_indexer(target_depth_dx, 2);
NDArrayIndexer target_depth_dy_indexer(target_depth_dy, 2);
NDArrayIndexer target_intensity_dx_indexer(target_intensity_dx, 2);
NDArrayIndexer target_intensity_dy_indexer(target_intensity_dy, 2);
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target;
t::geometry::kernel::TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
const int64_t n = rows * cols;
// A_29xN is a {29, N} shaped tensor, which is later reduced to
// {29} where [0, 20] elements are used to construct {6,6} shaped
// symmetric AtA matrix, [21, 26] elements are used to construct {6} AtB
// matrix, element [27] stores residual and element [28] stores count.
core::Tensor A_29xN =
core::Tensor::Empty({29, n}, core::Dtype::Float32, device);
float* A_reduction = A_29xN.GetDataPtr<float>();
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
float J_I[6], J_D[6];
float r_I, r_D;
bool valid = GetJacobianHybrid(
workload_idx, cols, depth_diff, source_depth_indexer,
target_depth_indexer, source_intensity_indexer,
target_intensity_indexer, target_depth_dx_indexer,
target_depth_dy_indexer, target_intensity_dx_indexer,
target_intensity_dy_indexer, source_vertex_indexer, ti,
J_I, J_D, r_I, r_D);
if (valid) {
for (int i = 0, j = 0; j < 6; j++) {
for (int k = 0; k <= j; k++) {
A_reduction[n * i + workload_idx] =
J_I[j] * J_I[k] + J_D[j] * J_D[k];
i++;
}
A_reduction[n * (21 + j) + workload_idx] =
J_I[j] * r_I + J_D[j] * r_D;
}
A_reduction[n * 27 + workload_idx] = r_I * r_I + r_D * r_D;
A_reduction[n * 28 + workload_idx] = 1;
} else {
for (int i = 0; i < 29; i++) {
A_reduction[n * i + workload_idx] = 0;
}
}
});
ReduceAndSolve6x6(A_reduction, delta, residual, n, device);
}
} // namespace odometry
} // namespace kernel
} // namespace pipelines
} // namespace t
} // namespace open3d
| ef2dac92c64494cc223873affa01c3ff256552dc.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <cuda.h>
#include <cub/cub.cuh>
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/Dispatch.h"
#include "open3d/core/Tensor.h"
#include "open3d/core/kernel/CUDALauncher.cuh"
#include "open3d/t/geometry/kernel/GeometryIndexer.h"
#include "open3d/t/geometry/kernel/GeometryMacros.h"
#include "open3d/t/pipelines/kernel/RGBDOdometryImpl.h"
#include "open3d/t/pipelines/kernel/RGBDOdometryJacobianImpl.h"
#include "open3d/t/pipelines/kernel/TransformationConverter.h"
namespace open3d {
namespace t {
namespace pipelines {
namespace kernel {
namespace odometry {
void ReduceAndSolve6x6(float* A_reduction,
core::Tensor& delta,
core::Tensor& residual,
int64_t n,
const core::Device& device) {
core::Tensor output_29 =
core::Tensor::Empty({29}, core::Dtype::Float32, device);
float* output_29_data = output_29.GetDataPtr<float>();
// Reduction of {29, N} to {29}.
for (int i = 0; i < 29; i++) {
// Determine temporary device storage requirements.
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes,
A_reduction + i * n, output_29_data + i, n);
// Allocate temporary storage.
cudaMalloc(&d_temp_storage, temp_storage_bytes);
// Run sum-reduction.
cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes,
A_reduction + i * n, output_29_data + i, n);
cudaFree(d_temp_storage);
}
DecodeAndSolve6x6(output_29, delta, residual);
}
template <typename T>
__device__ inline void WarpReduceSum(volatile T* local_sum, const int tid) {
local_sum[tid] += local_sum[tid + 32];
local_sum[tid] += local_sum[tid + 16];
local_sum[tid] += local_sum[tid + 8];
local_sum[tid] += local_sum[tid + 4];
local_sum[tid] += local_sum[tid + 2];
local_sum[tid] += local_sum[tid + 1];
}
template <typename T, size_t BLOCK_SIZE>
__device__ inline void BlockReduceSum(const int tid, volatile T* local_sum) {
if (BLOCK_SIZE >= 512) {
if (tid < 256) {
local_sum[tid] += local_sum[tid + 256];
}
__syncthreads();
}
if (BLOCK_SIZE >= 256) {
if (tid < 128) {
local_sum[tid] += local_sum[tid + 128];
}
__syncthreads();
}
if (BLOCK_SIZE >= 128) {
if (tid < 64) {
local_sum[tid] += local_sum[tid + 64];
}
__syncthreads();
}
if (tid < 32) {
WarpReduceSum<T>(local_sum, tid);
}
}
template <typename T, size_t BLOCK_SIZE>
__device__ inline void BlockReduceSum(const int tid,
volatile T* local_sum0,
volatile T* local_sum1) {
if (BLOCK_SIZE >= 512) {
if (tid < 256) {
local_sum0[tid] += local_sum0[tid + 256];
local_sum1[tid] += local_sum1[tid + 256];
}
__syncthreads();
}
if (BLOCK_SIZE >= 256) {
if (tid < 128) {
local_sum0[tid] += local_sum0[tid + 128];
local_sum1[tid] += local_sum1[tid + 128];
}
__syncthreads();
}
if (BLOCK_SIZE >= 128) {
if (tid < 64) {
local_sum0[tid] += local_sum0[tid + 64];
local_sum1[tid] += local_sum1[tid + 64];
}
__syncthreads();
}
if (tid < 32) {
WarpReduceSum<float>(local_sum0, tid);
WarpReduceSum<float>(local_sum1, tid);
}
}
template <typename T, size_t BLOCK_SIZE>
__device__ inline void BlockReduceSum(const int tid,
volatile T* local_sum0,
volatile T* local_sum1,
volatile T* local_sum2) {
if (BLOCK_SIZE >= 512) {
if (tid < 256) {
local_sum0[tid] += local_sum0[tid + 256];
local_sum1[tid] += local_sum1[tid + 256];
local_sum2[tid] += local_sum2[tid + 256];
}
__syncthreads();
}
if (BLOCK_SIZE >= 256) {
if (tid < 128) {
local_sum0[tid] += local_sum0[tid + 128];
local_sum1[tid] += local_sum1[tid + 128];
local_sum2[tid] += local_sum2[tid + 128];
}
__syncthreads();
}
if (BLOCK_SIZE >= 128) {
if (tid < 64) {
local_sum0[tid] += local_sum0[tid + 64];
local_sum1[tid] += local_sum1[tid + 64];
local_sum2[tid] += local_sum2[tid + 64];
}
__syncthreads();
}
if (tid < 32) {
WarpReduceSum<float>(local_sum0, tid);
WarpReduceSum<float>(local_sum1, tid);
WarpReduceSum<float>(local_sum2, tid);
}
}
__global__ void ComputePosePointToPlaneCUDAKernel(
NDArrayIndexer source_vertex_indexer,
NDArrayIndexer target_vertex_indexer,
NDArrayIndexer target_normal_indexer,
TransformIndexer ti,
float* global_sum,
int rows,
int cols,
float depth_diff) {
const int kBlockSize = 256;
__shared__ float local_sum0[kBlockSize];
__shared__ float local_sum1[kBlockSize];
__shared__ float local_sum2[kBlockSize];
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int tid = threadIdx.x + threadIdx.y * blockDim.x;
local_sum0[tid] = 0;
local_sum1[tid] = 0;
local_sum2[tid] = 0;
if (y >= rows || x >= cols) return;
float J[6] = {0}, reduction[21 + 6 + 2];
float r = 0;
bool valid = GetJacobianPointToPlane(
x, y, depth_diff, source_vertex_indexer, target_vertex_indexer,
target_normal_indexer, ti, J, r);
// Dump J, r into JtJ and Jtr
int offset = 0;
for (int i = 0; i < 6; ++i) {
for (int j = 0; j <= i; ++j) {
reduction[offset++] = J[i] * J[j];
}
}
for (int i = 0; i < 6; ++i) {
reduction[offset++] = J[i] * r;
}
reduction[offset++] = r * r;
reduction[offset++] = valid;
// Sum reduction: JtJ(21) and Jtr(6)
for (size_t i = 0; i < 27; i += 3) {
local_sum0[tid] = valid ? reduction[i + 0] : 0;
local_sum1[tid] = valid ? reduction[i + 1] : 0;
local_sum2[tid] = valid ? reduction[i + 2] : 0;
__syncthreads();
BlockReduceSum<float, kBlockSize>(tid, local_sum0, local_sum1,
local_sum2);
if (tid == 0) {
atomicAdd(&global_sum[i + 0], local_sum0[0]);
atomicAdd(&global_sum[i + 1], local_sum1[0]);
atomicAdd(&global_sum[i + 2], local_sum2[0]);
}
__syncthreads();
}
// Sum reduction: residual(1) and inlier(1)
{
local_sum0[tid] = valid ? reduction[27] : 0;
local_sum1[tid] = valid ? reduction[28] : 0;
__syncthreads();
BlockReduceSum<float, kBlockSize>(tid, local_sum0, local_sum1);
if (tid == 0) {
atomicAdd(&global_sum[27], local_sum0[0]);
atomicAdd(&global_sum[28], local_sum1[0]);
}
__syncthreads();
}
}
void ComputePosePointToPlaneCUDA(const core::Tensor& source_vertex_map,
const core::Tensor& target_vertex_map,
const core::Tensor& target_normal_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
core::Tensor& residual,
float depth_diff) {
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
NDArrayIndexer target_vertex_indexer(target_vertex_map, 2);
NDArrayIndexer target_normal_indexer(target_normal_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target;
TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
// A_29xN is a {29, N} shaped tensor, which is later reduced to {29} where
// [0, 20] elements are used to construct {6,6} shaped symmetric AtA
// matrix, [21, 26] elements are used to construct {6} AtB matrix, element
// [27] stores residual and element [28] stores count.
core::Tensor global_sum =
core::Tensor::Zeros({29}, core::Dtype::Float32, device);
float* global_sum_ptr = global_sum.GetDataPtr<float>();
const int kThreadSize = 16;
const dim3 blocks((cols + kThreadSize - 1) / kThreadSize,
(rows + kThreadSize - 1) / kThreadSize);
const dim3 threads(kThreadSize, kThreadSize);
ComputePosePointToPlaneCUDAKernel<<<blocks, threads>>>(
source_vertex_indexer, target_vertex_indexer, target_normal_indexer,
ti, global_sum_ptr, rows, cols, depth_diff);
OPEN3D_CUDA_CHECK(cudaDeviceSynchronize());
DecodeAndSolve6x6(global_sum, delta, residual);
}
void ComputePoseIntensityCUDA(const core::Tensor& source_depth,
const core::Tensor& target_depth,
const core::Tensor& source_intensity,
const core::Tensor& target_intensity,
const core::Tensor& target_intensity_dx,
const core::Tensor& target_intensity_dy,
const core::Tensor& source_vertex_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
core::Tensor& residual,
float depth_diff) {
NDArrayIndexer source_depth_indexer(source_depth, 2);
NDArrayIndexer target_depth_indexer(target_depth, 2);
NDArrayIndexer source_intensity_indexer(source_intensity, 2);
NDArrayIndexer target_intensity_indexer(target_intensity, 2);
NDArrayIndexer target_intensity_dx_indexer(target_intensity_dx, 2);
NDArrayIndexer target_intensity_dy_indexer(target_intensity_dy, 2);
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target;
t::geometry::kernel::TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
const int64_t n = rows * cols;
// A_29xN is a {29, N} shaped tensor, which is later reduced to
// {29} where [0, 20] elements are used to construct {6,6} shaped
// symmetric AtA matrix, [21, 26] elements are used to construct {6} AtB
// matrix, element [27] stores residual and element [28] stores count.
core::Tensor A_29xN =
core::Tensor::Empty({29, n}, core::Dtype::Float32, device);
float* A_reduction = A_29xN.GetDataPtr<float>();
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
float J_I[6];
float r_I;
bool valid = GetJacobianIntensity(
workload_idx, cols, depth_diff, source_depth_indexer,
target_depth_indexer, source_intensity_indexer,
target_intensity_indexer, target_intensity_dx_indexer,
target_intensity_dy_indexer, source_vertex_indexer, ti,
J_I, r_I);
if (valid) {
for (int i = 0, j = 0; j < 6; j++) {
for (int k = 0; k <= j; k++) {
A_reduction[n * i + workload_idx] = J_I[j] * J_I[k];
i++;
}
A_reduction[n * (21 + j) + workload_idx] = J_I[j] * r_I;
}
A_reduction[n * 27 + workload_idx] = r_I * r_I;
A_reduction[n * 28 + workload_idx] = 1;
} else {
for (int i = 0; i < 29; i++) {
A_reduction[n * i + workload_idx] = 0;
}
}
});
ReduceAndSolve6x6(A_reduction, delta, residual, n, device);
}
void ComputePoseHybridCUDA(const core::Tensor& source_depth,
const core::Tensor& target_depth,
const core::Tensor& source_intensity,
const core::Tensor& target_intensity,
const core::Tensor& target_depth_dx,
const core::Tensor& target_depth_dy,
const core::Tensor& target_intensity_dx,
const core::Tensor& target_intensity_dy,
const core::Tensor& source_vertex_map,
const core::Tensor& intrinsics,
const core::Tensor& init_source_to_target,
core::Tensor& delta,
core::Tensor& residual,
float depth_diff) {
NDArrayIndexer source_depth_indexer(source_depth, 2);
NDArrayIndexer target_depth_indexer(target_depth, 2);
NDArrayIndexer source_intensity_indexer(source_intensity, 2);
NDArrayIndexer target_intensity_indexer(target_intensity, 2);
NDArrayIndexer target_depth_dx_indexer(target_depth_dx, 2);
NDArrayIndexer target_depth_dy_indexer(target_depth_dy, 2);
NDArrayIndexer target_intensity_dx_indexer(target_intensity_dx, 2);
NDArrayIndexer target_intensity_dy_indexer(target_intensity_dy, 2);
NDArrayIndexer source_vertex_indexer(source_vertex_map, 2);
core::Device device = source_vertex_map.GetDevice();
core::Tensor trans = init_source_to_target;
t::geometry::kernel::TransformIndexer ti(intrinsics, trans);
const int64_t rows = source_vertex_indexer.GetShape(0);
const int64_t cols = source_vertex_indexer.GetShape(1);
const int64_t n = rows * cols;
// A_29xN is a {29, N} shaped tensor, which is later reduced to
// {29} where [0, 20] elements are used to construct {6,6} shaped
// symmetric AtA matrix, [21, 26] elements are used to construct {6} AtB
// matrix, element [27] stores residual and element [28] stores count.
core::Tensor A_29xN =
core::Tensor::Empty({29, n}, core::Dtype::Float32, device);
float* A_reduction = A_29xN.GetDataPtr<float>();
core::kernel::CUDALauncher::LaunchGeneralKernel(
n, [=] OPEN3D_DEVICE(int64_t workload_idx) {
float J_I[6], J_D[6];
float r_I, r_D;
bool valid = GetJacobianHybrid(
workload_idx, cols, depth_diff, source_depth_indexer,
target_depth_indexer, source_intensity_indexer,
target_intensity_indexer, target_depth_dx_indexer,
target_depth_dy_indexer, target_intensity_dx_indexer,
target_intensity_dy_indexer, source_vertex_indexer, ti,
J_I, J_D, r_I, r_D);
if (valid) {
for (int i = 0, j = 0; j < 6; j++) {
for (int k = 0; k <= j; k++) {
A_reduction[n * i + workload_idx] =
J_I[j] * J_I[k] + J_D[j] * J_D[k];
i++;
}
A_reduction[n * (21 + j) + workload_idx] =
J_I[j] * r_I + J_D[j] * r_D;
}
A_reduction[n * 27 + workload_idx] = r_I * r_I + r_D * r_D;
A_reduction[n * 28 + workload_idx] = 1;
} else {
for (int i = 0; i < 29; i++) {
A_reduction[n * i + workload_idx] = 0;
}
}
});
ReduceAndSolve6x6(A_reduction, delta, residual, n, device);
}
} // namespace odometry
} // namespace kernel
} // namespace pipelines
} // namespace t
} // namespace open3d
|
44d8dc3e1bd906b022cf9c5be1624ec75b27596d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//******************************************************************************
// Created by Edward Connell
// Copyright (c) 2016 Connell Research. All rights reserved.
//
#include "include/CudaKernels.h"
#include <hiprand/hiprand_kernel.h>
//------------------------------------------------------------------------------
// dropoutForward_kernel
template <typename T>
__global__ void dropoutForward_kernel(
hiprandState_t *state, T threshold, T scale, T* mask,
const cudaShape_t inShape, const T* inData,
const cudaShape_t outShape, T* outData)
{
CUDA_KERNEL_LOOP(i, inShape.extent[0]) {
unsigned iIdx = i * inShape.stride[0];
unsigned oIdx = i * outShape.stride[0];
mask[i] = (hiprand_uniform(&state[i]) > threshold) ? scale : 0;
// multiply by either 0 or the scale
outData[oIdx] = inData[iIdx] * mask[i];
}
}
//-------------------------------------
// cudaDropoutForward
hipError_t cudaDropoutForward(
const cudaShape_t inShape, const void *inData,
const cudaShape_t outShape, void *outData,
double ratio, void *mask,
void* generatorState, hipStream_t stream)
{
// require flattening for now
assert(inShape.dataType == outShape.dataType);
unsigned numBlocks = CUDA_NUM_BLOCKS(inShape.extent[0]);
unsigned numThreads = CUDA_NUM_THREADS;
double scale = 1.0 / (1.0 - ratio);
switch(inShape.dataType) {
case HIP_R_32F:
hipLaunchKernelGGL(( dropoutForward_kernel<float>) , dim3(numBlocks), dim3(numThreads), 0, stream,
(hiprandState_t *) generatorState, (float)ratio, (float)scale, (float*)mask,
inShape, (float*)inData, outShape, (float*)outData);
break;
case HIP_R_64F:
hipLaunchKernelGGL(( dropoutForward_kernel<double>) , dim3(numBlocks), dim3(numThreads), 0, stream,
(hiprandState_t *) generatorState, ratio, scale, (double*)mask,
inShape, (double*)inData, outShape, (double*)outData);
break;
default: assert(false);
};
return CudaKernelPostCheck(stream);
}
//------------------------------------------------------------------------------
// dropoutBackward_kernel
template <typename T>
__global__ void dropoutBackward_kernel(
const cudaShape_t outShape, const T* outDiff,
const cudaShape_t inShape, T* inDiff, const T* mask)
{
CUDA_KERNEL_LOOP(i, inShape.extent[0]) {
unsigned iIdx = i * inShape.stride[0];
unsigned oIdx = i * outShape.stride[0];
inDiff[iIdx] = outDiff[oIdx] * mask[i];
}
}
//-------------------------------------
// cudaDropoutBackward
hipError_t cudaDropoutBackward(
const cudaShape_t outShape, const void *outDiff,
const cudaShape_t inShape, void *inDiff,
const void *mask, hipStream_t stream)
{
CudaKernelPreCheck(stream);
// require flattening for now
assert(inShape.dataType == outShape.dataType);
unsigned numBlocks = CUDA_NUM_BLOCKS(inShape.extent[0]);
unsigned numThreads = CUDA_NUM_THREADS;
switch(inShape.dataType) {
case HIP_R_32F:
hipLaunchKernelGGL(( dropoutBackward_kernel<float>) , dim3(numBlocks), dim3(numThreads), 0, stream,
outShape, (float*)outDiff, inShape, (float*)inDiff, (float*)mask);
break;
case HIP_R_64F:
hipLaunchKernelGGL(( dropoutBackward_kernel<double>) , dim3(numBlocks), dim3(numThreads), 0, stream,
outShape, (double*)outDiff, inShape, (double*)inDiff, (double*)mask);
break;
default: assert(false);
};
return CudaKernelPostCheck(stream);
}
| 44d8dc3e1bd906b022cf9c5be1624ec75b27596d.cu | //******************************************************************************
// Created by Edward Connell
// Copyright (c) 2016 Connell Research. All rights reserved.
//
#include "include/CudaKernels.h"
#include <curand_kernel.h>
//------------------------------------------------------------------------------
// dropoutForward_kernel
template <typename T>
__global__ void dropoutForward_kernel(
curandState *state, T threshold, T scale, T* mask,
const cudaShape_t inShape, const T* inData,
const cudaShape_t outShape, T* outData)
{
CUDA_KERNEL_LOOP(i, inShape.extent[0]) {
unsigned iIdx = i * inShape.stride[0];
unsigned oIdx = i * outShape.stride[0];
mask[i] = (curand_uniform(&state[i]) > threshold) ? scale : 0;
// multiply by either 0 or the scale
outData[oIdx] = inData[iIdx] * mask[i];
}
}
//-------------------------------------
// cudaDropoutForward
cudaError_t cudaDropoutForward(
const cudaShape_t inShape, const void *inData,
const cudaShape_t outShape, void *outData,
double ratio, void *mask,
void* generatorState, cudaStream_t stream)
{
// require flattening for now
assert(inShape.dataType == outShape.dataType);
unsigned numBlocks = CUDA_NUM_BLOCKS(inShape.extent[0]);
unsigned numThreads = CUDA_NUM_THREADS;
double scale = 1.0 / (1.0 - ratio);
switch(inShape.dataType) {
case CUDA_R_32F:
dropoutForward_kernel<float> <<<numBlocks, numThreads, 0, stream>>>
((curandState *) generatorState, (float)ratio, (float)scale, (float*)mask,
inShape, (float*)inData, outShape, (float*)outData);
break;
case CUDA_R_64F:
dropoutForward_kernel<double> <<<numBlocks, numThreads, 0, stream>>>
((curandState *) generatorState, ratio, scale, (double*)mask,
inShape, (double*)inData, outShape, (double*)outData);
break;
default: assert(false);
};
return CudaKernelPostCheck(stream);
}
//------------------------------------------------------------------------------
// dropoutBackward_kernel
template <typename T>
__global__ void dropoutBackward_kernel(
const cudaShape_t outShape, const T* outDiff,
const cudaShape_t inShape, T* inDiff, const T* mask)
{
CUDA_KERNEL_LOOP(i, inShape.extent[0]) {
unsigned iIdx = i * inShape.stride[0];
unsigned oIdx = i * outShape.stride[0];
inDiff[iIdx] = outDiff[oIdx] * mask[i];
}
}
//-------------------------------------
// cudaDropoutBackward
cudaError_t cudaDropoutBackward(
const cudaShape_t outShape, const void *outDiff,
const cudaShape_t inShape, void *inDiff,
const void *mask, cudaStream_t stream)
{
CudaKernelPreCheck(stream);
// require flattening for now
assert(inShape.dataType == outShape.dataType);
unsigned numBlocks = CUDA_NUM_BLOCKS(inShape.extent[0]);
unsigned numThreads = CUDA_NUM_THREADS;
switch(inShape.dataType) {
case CUDA_R_32F:
dropoutBackward_kernel<float> <<<numBlocks, numThreads, 0, stream>>>
(outShape, (float*)outDiff, inShape, (float*)inDiff, (float*)mask);
break;
case CUDA_R_64F:
dropoutBackward_kernel<double> <<<numBlocks, numThreads, 0, stream>>>
(outShape, (double*)outDiff, inShape, (double*)inDiff, (double*)mask);
break;
default: assert(false);
};
return CudaKernelPostCheck(stream);
}
|
5fddd4932ed68064fe315fedd1eb87b9c538ae53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* use weno derivative to calculate the numerical Hamiltonian for reinitialization
* scheme
* 5th order polynomial interpolation will be used to locate the boundary
* weno53 scheme will be implemented on a nonuniform stencil near boundary
* central weno 6th order accurate scheme will be applied at nodes not immediately
* next to the boundary
******************************************************************************/
#include "shared_utilities.hpp"
__device__ inline
double weno_onesided_derivative(double v1, double v2, double v3, double v4, double v5)
{
// different choices of ENO derivatives
double phi1 = 1./3. * v1 - 7./6. * v2 + 11./6. * v3;
double phi2 = -1./6. * v2 + 5./6. * v3 + 1./3. * v4;
double phi3 = 1./3. * v3 + 5./6. * v4 - 1./6. * v5;
// smoothness parameter
double S1 = 13./12. * pow((v1 - 2*v2 + v3),2) + 1./4. * pow((v1 - 4*v2 + 3*v3),2);
double S2 = 13./12. * pow((v2 - 2*v3 + v4),2) + 1./4. * pow((v2 - v4),2);
double S3 = 13./12. * pow((v3 - 2*v4 + v5),2) + 1./4. * pow((3*v3 - 4*v4 + v5),2);
double epsilon = 1e-6;
double alpha1 = 0.1 / pow( (S1 + epsilon), 2);
double alpha2 = 0.6 / pow( (S2 + epsilon), 2);
double alpha3 = 0.3 / pow( (S3 + epsilon), 2);
// weights for each stencil
double sum = alpha1 + alpha2 + alpha3;
double omega1 = alpha1 / sum;
double omega2 = alpha2 / sum;
double omega3 = alpha3 / sum;
return (omega1*phi1 + omega2*phi2 + omega3*phi3);
}
// given a stencil across the boundary: p1<-l3-p2<-l2-p3<-l1-p4-r1->p5-r2->p6-r3->p7
// create a new stencil (x3m,h3m),(x2m,h2m),(x1m,h1m),(x0,h0),(x1,h1),(x2,h2),(x3,h3) including boundary nodes
__device__ inline
void select_stencil(double & h3m, double & h2m, double & h1m, double & h0, double & h1, double & h2, double & h3, double & x3m, double & x2m, double & x1m, double & x0, double & x1, double & x2, double & x3, double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds)
{
h0 = p4; x0 = 0.0;
if(r1<ds){
x1 = r1;
x2 = ds;
x3 = 2*ds;
h1 = 0.0;
h2 = p5;
h3 = p6;
}else{
x1 = ds;
x2 = 2*ds;
x3 = 3*ds;
h1 = p5;
h2 = p6;
h3 = p7;
}
if(l1<ds){
x1m = -l1;
x2m = - ds;
x3m = - 2*ds;
h1m = 0.0;
h2m = p3;
h3m = p2;
}else{
x1m = -ds;
x2m = - 2*ds;
x3m = - 3*ds;
h1m = p3;
h2m = p2;
h3m = p1;
}
}
__device__ inline
void weno_nonuniform(double & d_fore, double & d_back, double h3m, double h2m, double h1m, double h0, double h1, double h2, double h3, double x3m, double x2m, double x1m, double x0, double x1, double x2, double x3)
{
// first divided differences, i.e. cell averages of derivatives
double d1_2_5 = (h3 - h2) / (x3 - x2) ;
double d1_1_5 = (h2 - h1) / (x2 - x1) ;
double d1_0_5 = (h1 - h0) / (x1 - x0) ;
double d1_m0_5 = (h0 - h1m) / (x0 - x1m);
double d1_m1_5 = (h1m - h2m) / (x1m - x2m);
double d1_m2_5 = (h2m - h3m) / (x2m - x3m);
// boundary nodes and cell averages
double x_m2_5, x_m1_5, x_m0_5, x_0_5, x_1_5, x_2_5;
double u_m2, u_m1, u_0, u_1, u_2;
double v0, v1, v2, c0, c1, c2, s0, s1, s2;
double epsilon = 1e-6, alpha0, alpha1, alpha2, sum, omega0, omega1, omega2;
// calculate d_fore, choose a forward baised stencil
x_m2_5 = x2m; x_m1_5 = x1m; x_m0_5 = x0; x_0_5 = x1; x_1_5 = x2; x_2_5 = x3;
u_m2 = d1_m1_5; u_m1 = d1_m0_5; u_0 = d1_0_5; u_1 = d1_1_5; u_2 = d1_2_5;
// now we calculate u_m0_5 from cell averages for different stencils
v0 = u_1
+ (u_0 - u_1) * (1.0 + (x_0_5 - x_m0_5)/(x_1_5 - x_m0_5) + (x_0_5 - x_m0_5)/(x_2_5 - x_m0_5))
+ (u_2 - u_1) * ((x_0_5 - x_m0_5)/(x_2_5 - x_m0_5)) * ((x_1_5 - x_m0_5)/(x_2_5 - x_0_5)) ;
v1 = u_0
+ (u_m1 - u_0) * ((x_0_5 - x_m0_5)/(x_1_5 - x_m1_5)) * ((x_1_5 - x_m0_5)/(x_0_5 - x_m1_5))
- (u_1 - u_0) * ((x_0_5 - x_m0_5)/(x_1_5 - x_m1_5)) * ((x_m0_5 - x_m1_5)/(x_1_5 - x_m0_5)) ;
v2 = u_m1
+ (u_0 - u_m1) * ((x_m0_5 - x_m1_5)/(x_0_5 - x_m2_5)) * ((x_m0_5 - x_m2_5)/(x_0_5 - x_m1_5))
- (u_m2 - u_m1) * ((x_m0_5 - x_m1_5)/(x_0_5 - x_m2_5)) * ((x_0_5 - x_m0_5)/(x_m0_5 - x_m2_5)) ;
// optimal weights in smooth region
c0 = ((x_m0_5 - x_m1_5)/(x_2_5 - x_m2_5)) * ((x_m0_5 - x_m2_5)/(x_2_5 - x_m1_5)) ;
c1 = ((x_m0_5 - x_m2_5)/(x_2_5 - x_m2_5)) * ((x_2_5 - x_m0_5)/(x_2_5 - x_m1_5))
* (1.0 + (x_2_5 - x_m1_5)/(x_1_5 - x_m2_5)) ;
c2 = ((x_1_5 - x_m0_5)/(x_2_5 - x_m2_5)) * ((x_2_5 - x_m0_5)/(x_1_5 - x_m2_5)) ;
// smoothness indicator
{
s0 = 4.0 * pow( (x_0_5 - x_m0_5)/(x_2_5 - x_m0_5), 2) *
(
pow( (u_2 - u_1)/(x_2_5 - x_0_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5,2) + (x_1_5-x_m0_5) * (x_1_5 - x_0_5)
)
+ (u_2 - u_1) * (u_0 - u_1) / ((x_2_5 - x_0_5)*(x_1_5 - x_m0_5)) *
( 20.0 * pow(x_0_5 - x_m0_5,2) + 2.0 * (x_1_5 - x_m0_5) * (x_1_5 - x_0_5)
+ (x_2_5 - x_m0_5) * (2.0 * x_1_5 - x_0_5 - x_m0_5)
)
+ pow( (u_0 - u_1)/(x_1_5 - x_m0_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5,2)
+ (x_2_5 + x_1_5 - 2.0 * x_m0_5) * (x_2_5 + x_1_5 - x_0_5 - x_m0_5)
)
);
s1 = 4.0 * pow( (x_0_5 - x_m0_5)/(x_1_5 - x_m1_5), 2) *
(
pow( (u_m1 - u_0)/(x_0_5 - x_m1_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5,2) + (x_1_5 - x_m0_5) * (x_1_5 - x_0_5)
)
+ (u_1 - u_0) * (u_m1 - u_0) / ((x_1_5 - x_m0_5)*(x_0_5 - x_m1_5)) *
( 20.0 * pow(x_0_5 - x_m0_5,2) - (x_1_5 - x_0_5)*(x_m0_5 - x_m1_5)
- (x_1_5 - x_m0_5)*(x_0_5 - x_m1_5)
)
+ pow( (u_1 -u_0)/(x_1_5 - x_m0_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5, 2) + (x_m0_5 - x_m1_5)*(x_0_5 - x_m1_5)
)
);
s2 = 4.0 *pow( (x_0_5 - x_m0_5)/(x_0_5 - x_m2_5), 2) *
(
pow( (u_m2 - u_m1)/(x_m0_5 - x_m2_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5, 2) + (x_0_5 - x_m1_5)*(x_m0_5 - x_m1_5)
)
+ (u_0 - u_m1)*(u_m2 - u_m1) / ((x_0_5 - x_m1_5)*(x_m0_5 - x_m2_5)) *
( 20.0 * pow(x_0_5 - x_m0_5, 2)+ 2.0 * (x_0_5 - x_m1_5)*(x_m0_5 - x_m1_5)
+ (x_0_5 - x_m2_5)*(x_0_5 + x_m0_5 - 2.0 * x_m1_5)
)
+ pow( (u_0 - u_m1)/(x_0_5 - x_m1_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5, 2)
+ (2.0 * x_0_5 - x_m2_5 - x_m1_5)*(x_0_5 + x_m0_5 - x_m1_5 - x_m2_5)
)
);
}
// optimal weights
alpha0 = c0 / pow( (s0 + epsilon), 2);
alpha1 = c1 / pow( (s1 + epsilon), 2);
alpha2 = c2 / pow( (s2 + epsilon), 2);
sum = alpha0 + alpha1 + alpha2;
omega0 = alpha0 / sum;
omega1 = alpha1 / sum;
omega2 = alpha2 / sum;
d_fore = v0 * omega0 + v1 * omega1 + v2 * omega2;
// calculate d_back, choose a backward baised stencil
x_m2_5 = x3m; x_m1_5 = x2m; x_m0_5 = x1m; x_0_5 = x0; x_1_5 = x1; x_2_5 = x2;
u_m2 = d1_m2_5; u_m1 = d1_m1_5; u_0 = d1_m0_5; u_1 = d1_0_5; u_2 = d1_1_5;
// now we calculate u_0_5 from cell averages for different stencils
v0 = u_1
+ (u_0 - u_1) * ((x_1_5 - x_0_5)/(x_2_5 - x_m0_5)) * ((x_2_5 - x_0_5)/(x_1_5 - x_m0_5))
- (u_2 - u_1) * ((x_1_5 - x_0_5)/(x_2_5 - x_m0_5)) * ((x_0_5 - x_m0_5)/(x_2_5 - x_0_5)) ;
v1 = u_0
+ (u_1 - u_0) * ((x_0_5 - x_m0_5)/(x_1_5 - x_m1_5)) * ((x_0_5 - x_m1_5)/(x_1_5 - x_m0_5))
- (u_m1 - u_0) * ((x_0_5 - x_m0_5)/(x_1_5 - x_m1_5)) * ((x_1_5 - x_0_5)/(x_0_5 - x_m1_5)) ;
v2 = u_m1
+ (u_m2 - u_m1) * ((x_0_5 - x_m0_5)/(x_m0_5 - x_m2_5)) * ((x_0_5 - x_m1_5)/(x_0_5 - x_m2_5))
+ (u_0 - u_m1) * (1.0 + (x_0_5 - x_m0_5)/(x_0_5 - x_m1_5) + (x_0_5 - x_m0_5)/(x_0_5 - x_m2_5)) ;
// optimal weights in smooth region
c0 = ((x_0_5 - x_m2_5)/(x_2_5 - x_m2_5)) * ((x_0_5 - x_m1_5)/(x_2_5 -x_m1_5)) ;
c1 = ((x_0_5 - x_m2_5)/(x_2_5 - x_m2_5)) * ((x_2_5 - x_0_5)/(x_2_5 - x_m1_5))
* (1.0 + (x_2_5 - x_m1_5)/(x_1_5 - x_m2_5)) ;
c2 = ((x_1_5 - x_0_5)/(x_2_5 - x_m2_5)) * ((x_2_5 - x_0_5)/(x_1_5 - x_m2_5)) ;
// smoothness indicator
{
s0 = 4.0 * pow( (x_0_5 - x_m0_5)/(x_2_5 - x_m0_5), 2) *
(
pow( (u_2 - u_1)/(x_2_5 - x_0_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5,2) + (x_1_5-x_m0_5) * (x_1_5 - x_0_5)
)
+ (u_2 - u_1) * (u_0 - u_1) / ((x_2_5 - x_0_5)*(x_1_5 - x_m0_5)) *
( 20.0 * pow(x_0_5 - x_m0_5,2) + 2.0 * (x_1_5 - x_m0_5) * (x_1_5 - x_0_5)
+ (x_2_5 - x_m0_5) * (2.0 * x_1_5 - x_0_5 - x_m0_5)
)
+ pow( (u_0 - u_1)/(x_1_5 - x_m0_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5,2)
+ (x_2_5 + x_1_5 - 2.0 * x_m0_5) * (x_2_5 + x_1_5 - x_0_5 - x_m0_5)
)
);
s1 = 4.0 * pow( (x_0_5 - x_m0_5)/(x_1_5 - x_m1_5), 2) *
(
pow( (u_m1 - u_0)/(x_0_5 - x_m1_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5,2) + (x_1_5 - x_m0_5) * (x_1_5 - x_0_5)
)
+ (u_1 - u_0) * (u_m1 - u_0) / ((x_1_5 - x_m0_5)*(x_0_5 - x_m1_5)) *
( 20.0 * pow(x_0_5 - x_m0_5,2) - (x_1_5 - x_0_5)*(x_m0_5 - x_m1_5)
- (x_1_5 - x_m0_5)*(x_0_5 - x_m1_5)
)
+ pow( (u_1 -u_0)/(x_1_5 - x_m0_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5, 2) + (x_m0_5 - x_m1_5)*(x_0_5 - x_m1_5)
)
);
s2 = 4.0 *pow( (x_0_5 - x_m0_5)/(x_0_5 - x_m2_5), 2) *
(
pow( (u_m2 - u_m1)/(x_m0_5 - x_m2_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5, 2) + (x_0_5 - x_m1_5)*(x_m0_5 - x_m1_5)
)
+ (u_0 - u_m1)*(u_m2 - u_m1) / ((x_0_5 - x_m1_5)*(x_m0_5 - x_m2_5)) *
( 20.0 * pow(x_0_5 - x_m0_5, 2)+ 2.0 * (x_0_5 - x_m1_5)*(x_m0_5 - x_m1_5)
+ (x_0_5 - x_m2_5)*(x_0_5 + x_m0_5 - 2.0 * x_m1_5)
)
+ pow( (u_0 - u_m1)/(x_0_5 - x_m1_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5, 2)
+ (2.0 * x_0_5 - x_m2_5 - x_m1_5)*(x_0_5 + x_m0_5 - x_m1_5 - x_m2_5)
)
);
}
// optimal weights
alpha0 = c0 / pow( (s0 + epsilon), 2);
alpha1 = c1 / pow( (s1 + epsilon), 2);
alpha2 = c2 / pow( (s2 + epsilon), 2);
sum = alpha0 + alpha1 + alpha2;
omega0 = alpha0 / sum;
omega1 = alpha1 / sum;
omega2 = alpha2 / sum;
d_back = v0 * omega0 + v1 * omega1 + v2 * omega2;
}
// calculate weno derivative at p4: p1<-l3-p2<-l2-p3<-l1-p4-r1->p5-r2->p6-r3->p7
// where px are level set function values at node x
// lx, rx are distance to the left/right node
__device__ inline
void weno_derivative_boundary(double & d_fore, double & d_back, double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds)
{
bool cross_interface = p3*p4<0 || p4*p5<0;
if(cross_interface){
double h3m,h2m,h1m,h0,h1,h2,h3;
double x3m,x2m,x1m,x0,x1,x2,x3;
select_stencil(h3m,h2m,h1m,h0,h1,h2,h3,x3m,x2m,x1m,x0,x1,x2,x3,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,ds);
weno_nonuniform(d_fore,d_back,h3m,h2m,h1m,h0,h1,h2,h3,x3m,x2m,x1m,x0,x1,x2,x3);
}// for nodes IMMEDIATELY adjacent to the boundary, use cubic ENO interpolant
else{
double v1 = (p2 - p1) / ds;
double v2 = (p3 - p2) / ds;
double v3 = (p4 - p3) / ds;
double v4 = (p5 - p4) / ds;
double v5 = (p6 - p5) / ds;
double v6 = (p7 - p6) / ds;
d_back = weno_onesided_derivative(v1,v2,v3,v4,v5);
d_fore = weno_onesided_derivative(v6,v5,v4,v3,v2);
}// if not a node IMMEDIATELY adjacent to the boundary, calculate weno derivatives as usual
}
__global__
void re_step(double * step, double const * lsf, bool const * mask, double const * deltat, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double epsilon = 1e-6 * dx;
if( xpr[ind]< epsilon || xpl[ind]<epsilon || ypf[ind]<epsilon || ypb[ind]<epsilon || zpu[ind]<epsilon || zpd[ind]<epsilon ){
step[ind] = 0;
return;
}// for a boundary node, do not change its value
double p1,p2,p3,p4,p5,p6,p7;
double r1,r2,r3,l1,l2,l3;
p4 = lsf[ind];
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
p1 = lsf[left3];
p2 = lsf[left2];
p3 = lsf[left1];
p5 = lsf[rght1];
p6 = lsf[rght2];
p7 = lsf[rght3];
r1 = xpr[ind];
r2 = xpr[rght1];
r3 = xpr[rght2];
l1 = xpl[ind];
l2 = xpl[left1];
l3 = xpl[left2];
double xR, xL;
weno_derivative_boundary(xR,xL,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dx);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
p1 = lsf[back3];
p2 = lsf[back2];
p3 = lsf[back1];
p5 = lsf[frnt1];
p6 = lsf[frnt2];
p7 = lsf[frnt3];
r1 = ypf[ind];
r2 = ypf[frnt1];
r3 = ypf[frnt2];
l1 = ypb[ind];
l2 = ypb[back1];
l3 = ypb[back2];
double yF, yB;
weno_derivative_boundary(yF,yB,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dy);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
p1 = lsf[down3];
p2 = lsf[down2];
p3 = lsf[down1];
p5 = lsf[upup1];
p6 = lsf[upup2];
p7 = lsf[upup3];
r1 = zpu[ind];
r2 = zpu[upup1];
r3 = zpu[upup2];
l1 = zpd[ind];
l2 = zpd[down1];
l3 = zpd[down2];
double zU, zD;
weno_derivative_boundary(zU,zD,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dz);
if (mask[ind]) {
step[ind] = ( sqrt( max2(pow(min2(0.0,xL),2),pow(max2(0.0,xR),2)) +
max2(pow(min2(0.0,yB),2),pow(max2(0.0,yF),2)) +
max2(pow(min2(0.0,zD),2),pow(max2(0.0,zU),2)) ) - 1
) * deltat[ind] * (-1.);
} else{
step[ind] = ( sqrt( max2(pow(max2(0.0,xL),2),pow(min2(0.0,xR),2)) +
max2(pow(max2(0.0,yB),2),pow(min2(0.0,yF),2)) +
max2(pow(max2(0.0,zD),2),pow(min2(0.0,zU),2)) ) - 1
) * deltat[ind] * (1.);
}
}
| 5fddd4932ed68064fe315fedd1eb87b9c538ae53.cu | /*******************************************************************************
* use weno derivative to calculate the numerical Hamiltonian for reinitialization
* scheme
* 5th order polynomial interpolation will be used to locate the boundary
* weno53 scheme will be implemented on a nonuniform stencil near boundary
* central weno 6th order accurate scheme will be applied at nodes not immediately
* next to the boundary
******************************************************************************/
#include "shared_utilities.hpp"
__device__ inline
double weno_onesided_derivative(double v1, double v2, double v3, double v4, double v5)
{
// different choices of ENO derivatives
double phi1 = 1./3. * v1 - 7./6. * v2 + 11./6. * v3;
double phi2 = -1./6. * v2 + 5./6. * v3 + 1./3. * v4;
double phi3 = 1./3. * v3 + 5./6. * v4 - 1./6. * v5;
// smoothness parameter
double S1 = 13./12. * pow((v1 - 2*v2 + v3),2) + 1./4. * pow((v1 - 4*v2 + 3*v3),2);
double S2 = 13./12. * pow((v2 - 2*v3 + v4),2) + 1./4. * pow((v2 - v4),2);
double S3 = 13./12. * pow((v3 - 2*v4 + v5),2) + 1./4. * pow((3*v3 - 4*v4 + v5),2);
double epsilon = 1e-6;
double alpha1 = 0.1 / pow( (S1 + epsilon), 2);
double alpha2 = 0.6 / pow( (S2 + epsilon), 2);
double alpha3 = 0.3 / pow( (S3 + epsilon), 2);
// weights for each stencil
double sum = alpha1 + alpha2 + alpha3;
double omega1 = alpha1 / sum;
double omega2 = alpha2 / sum;
double omega3 = alpha3 / sum;
return (omega1*phi1 + omega2*phi2 + omega3*phi3);
}
// given a stencil across the boundary: p1<-l3-p2<-l2-p3<-l1-p4-r1->p5-r2->p6-r3->p7
// create a new stencil (x3m,h3m),(x2m,h2m),(x1m,h1m),(x0,h0),(x1,h1),(x2,h2),(x3,h3) including boundary nodes
__device__ inline
void select_stencil(double & h3m, double & h2m, double & h1m, double & h0, double & h1, double & h2, double & h3, double & x3m, double & x2m, double & x1m, double & x0, double & x1, double & x2, double & x3, double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds)
{
h0 = p4; x0 = 0.0;
if(r1<ds){
x1 = r1;
x2 = ds;
x3 = 2*ds;
h1 = 0.0;
h2 = p5;
h3 = p6;
}else{
x1 = ds;
x2 = 2*ds;
x3 = 3*ds;
h1 = p5;
h2 = p6;
h3 = p7;
}
if(l1<ds){
x1m = -l1;
x2m = - ds;
x3m = - 2*ds;
h1m = 0.0;
h2m = p3;
h3m = p2;
}else{
x1m = -ds;
x2m = - 2*ds;
x3m = - 3*ds;
h1m = p3;
h2m = p2;
h3m = p1;
}
}
__device__ inline
void weno_nonuniform(double & d_fore, double & d_back, double h3m, double h2m, double h1m, double h0, double h1, double h2, double h3, double x3m, double x2m, double x1m, double x0, double x1, double x2, double x3)
{
// first divided differences, i.e. cell averages of derivatives
double d1_2_5 = (h3 - h2) / (x3 - x2) ;
double d1_1_5 = (h2 - h1) / (x2 - x1) ;
double d1_0_5 = (h1 - h0) / (x1 - x0) ;
double d1_m0_5 = (h0 - h1m) / (x0 - x1m);
double d1_m1_5 = (h1m - h2m) / (x1m - x2m);
double d1_m2_5 = (h2m - h3m) / (x2m - x3m);
// boundary nodes and cell averages
double x_m2_5, x_m1_5, x_m0_5, x_0_5, x_1_5, x_2_5;
double u_m2, u_m1, u_0, u_1, u_2;
double v0, v1, v2, c0, c1, c2, s0, s1, s2;
double epsilon = 1e-6, alpha0, alpha1, alpha2, sum, omega0, omega1, omega2;
// calculate d_fore, choose a forward baised stencil
x_m2_5 = x2m; x_m1_5 = x1m; x_m0_5 = x0; x_0_5 = x1; x_1_5 = x2; x_2_5 = x3;
u_m2 = d1_m1_5; u_m1 = d1_m0_5; u_0 = d1_0_5; u_1 = d1_1_5; u_2 = d1_2_5;
// now we calculate u_m0_5 from cell averages for different stencils
v0 = u_1
+ (u_0 - u_1) * (1.0 + (x_0_5 - x_m0_5)/(x_1_5 - x_m0_5) + (x_0_5 - x_m0_5)/(x_2_5 - x_m0_5))
+ (u_2 - u_1) * ((x_0_5 - x_m0_5)/(x_2_5 - x_m0_5)) * ((x_1_5 - x_m0_5)/(x_2_5 - x_0_5)) ;
v1 = u_0
+ (u_m1 - u_0) * ((x_0_5 - x_m0_5)/(x_1_5 - x_m1_5)) * ((x_1_5 - x_m0_5)/(x_0_5 - x_m1_5))
- (u_1 - u_0) * ((x_0_5 - x_m0_5)/(x_1_5 - x_m1_5)) * ((x_m0_5 - x_m1_5)/(x_1_5 - x_m0_5)) ;
v2 = u_m1
+ (u_0 - u_m1) * ((x_m0_5 - x_m1_5)/(x_0_5 - x_m2_5)) * ((x_m0_5 - x_m2_5)/(x_0_5 - x_m1_5))
- (u_m2 - u_m1) * ((x_m0_5 - x_m1_5)/(x_0_5 - x_m2_5)) * ((x_0_5 - x_m0_5)/(x_m0_5 - x_m2_5)) ;
// optimal weights in smooth region
c0 = ((x_m0_5 - x_m1_5)/(x_2_5 - x_m2_5)) * ((x_m0_5 - x_m2_5)/(x_2_5 - x_m1_5)) ;
c1 = ((x_m0_5 - x_m2_5)/(x_2_5 - x_m2_5)) * ((x_2_5 - x_m0_5)/(x_2_5 - x_m1_5))
* (1.0 + (x_2_5 - x_m1_5)/(x_1_5 - x_m2_5)) ;
c2 = ((x_1_5 - x_m0_5)/(x_2_5 - x_m2_5)) * ((x_2_5 - x_m0_5)/(x_1_5 - x_m2_5)) ;
// smoothness indicator
{
s0 = 4.0 * pow( (x_0_5 - x_m0_5)/(x_2_5 - x_m0_5), 2) *
(
pow( (u_2 - u_1)/(x_2_5 - x_0_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5,2) + (x_1_5-x_m0_5) * (x_1_5 - x_0_5)
)
+ (u_2 - u_1) * (u_0 - u_1) / ((x_2_5 - x_0_5)*(x_1_5 - x_m0_5)) *
( 20.0 * pow(x_0_5 - x_m0_5,2) + 2.0 * (x_1_5 - x_m0_5) * (x_1_5 - x_0_5)
+ (x_2_5 - x_m0_5) * (2.0 * x_1_5 - x_0_5 - x_m0_5)
)
+ pow( (u_0 - u_1)/(x_1_5 - x_m0_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5,2)
+ (x_2_5 + x_1_5 - 2.0 * x_m0_5) * (x_2_5 + x_1_5 - x_0_5 - x_m0_5)
)
);
s1 = 4.0 * pow( (x_0_5 - x_m0_5)/(x_1_5 - x_m1_5), 2) *
(
pow( (u_m1 - u_0)/(x_0_5 - x_m1_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5,2) + (x_1_5 - x_m0_5) * (x_1_5 - x_0_5)
)
+ (u_1 - u_0) * (u_m1 - u_0) / ((x_1_5 - x_m0_5)*(x_0_5 - x_m1_5)) *
( 20.0 * pow(x_0_5 - x_m0_5,2) - (x_1_5 - x_0_5)*(x_m0_5 - x_m1_5)
- (x_1_5 - x_m0_5)*(x_0_5 - x_m1_5)
)
+ pow( (u_1 -u_0)/(x_1_5 - x_m0_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5, 2) + (x_m0_5 - x_m1_5)*(x_0_5 - x_m1_5)
)
);
s2 = 4.0 *pow( (x_0_5 - x_m0_5)/(x_0_5 - x_m2_5), 2) *
(
pow( (u_m2 - u_m1)/(x_m0_5 - x_m2_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5, 2) + (x_0_5 - x_m1_5)*(x_m0_5 - x_m1_5)
)
+ (u_0 - u_m1)*(u_m2 - u_m1) / ((x_0_5 - x_m1_5)*(x_m0_5 - x_m2_5)) *
( 20.0 * pow(x_0_5 - x_m0_5, 2)+ 2.0 * (x_0_5 - x_m1_5)*(x_m0_5 - x_m1_5)
+ (x_0_5 - x_m2_5)*(x_0_5 + x_m0_5 - 2.0 * x_m1_5)
)
+ pow( (u_0 - u_m1)/(x_0_5 - x_m1_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5, 2)
+ (2.0 * x_0_5 - x_m2_5 - x_m1_5)*(x_0_5 + x_m0_5 - x_m1_5 - x_m2_5)
)
);
}
// optimal weights
alpha0 = c0 / pow( (s0 + epsilon), 2);
alpha1 = c1 / pow( (s1 + epsilon), 2);
alpha2 = c2 / pow( (s2 + epsilon), 2);
sum = alpha0 + alpha1 + alpha2;
omega0 = alpha0 / sum;
omega1 = alpha1 / sum;
omega2 = alpha2 / sum;
d_fore = v0 * omega0 + v1 * omega1 + v2 * omega2;
// calculate d_back, choose a backward baised stencil
x_m2_5 = x3m; x_m1_5 = x2m; x_m0_5 = x1m; x_0_5 = x0; x_1_5 = x1; x_2_5 = x2;
u_m2 = d1_m2_5; u_m1 = d1_m1_5; u_0 = d1_m0_5; u_1 = d1_0_5; u_2 = d1_1_5;
// now we calculate u_0_5 from cell averages for different stencils
v0 = u_1
+ (u_0 - u_1) * ((x_1_5 - x_0_5)/(x_2_5 - x_m0_5)) * ((x_2_5 - x_0_5)/(x_1_5 - x_m0_5))
- (u_2 - u_1) * ((x_1_5 - x_0_5)/(x_2_5 - x_m0_5)) * ((x_0_5 - x_m0_5)/(x_2_5 - x_0_5)) ;
v1 = u_0
+ (u_1 - u_0) * ((x_0_5 - x_m0_5)/(x_1_5 - x_m1_5)) * ((x_0_5 - x_m1_5)/(x_1_5 - x_m0_5))
- (u_m1 - u_0) * ((x_0_5 - x_m0_5)/(x_1_5 - x_m1_5)) * ((x_1_5 - x_0_5)/(x_0_5 - x_m1_5)) ;
v2 = u_m1
+ (u_m2 - u_m1) * ((x_0_5 - x_m0_5)/(x_m0_5 - x_m2_5)) * ((x_0_5 - x_m1_5)/(x_0_5 - x_m2_5))
+ (u_0 - u_m1) * (1.0 + (x_0_5 - x_m0_5)/(x_0_5 - x_m1_5) + (x_0_5 - x_m0_5)/(x_0_5 - x_m2_5)) ;
// optimal weights in smooth region
c0 = ((x_0_5 - x_m2_5)/(x_2_5 - x_m2_5)) * ((x_0_5 - x_m1_5)/(x_2_5 -x_m1_5)) ;
c1 = ((x_0_5 - x_m2_5)/(x_2_5 - x_m2_5)) * ((x_2_5 - x_0_5)/(x_2_5 - x_m1_5))
* (1.0 + (x_2_5 - x_m1_5)/(x_1_5 - x_m2_5)) ;
c2 = ((x_1_5 - x_0_5)/(x_2_5 - x_m2_5)) * ((x_2_5 - x_0_5)/(x_1_5 - x_m2_5)) ;
// smoothness indicator
{
s0 = 4.0 * pow( (x_0_5 - x_m0_5)/(x_2_5 - x_m0_5), 2) *
(
pow( (u_2 - u_1)/(x_2_5 - x_0_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5,2) + (x_1_5-x_m0_5) * (x_1_5 - x_0_5)
)
+ (u_2 - u_1) * (u_0 - u_1) / ((x_2_5 - x_0_5)*(x_1_5 - x_m0_5)) *
( 20.0 * pow(x_0_5 - x_m0_5,2) + 2.0 * (x_1_5 - x_m0_5) * (x_1_5 - x_0_5)
+ (x_2_5 - x_m0_5) * (2.0 * x_1_5 - x_0_5 - x_m0_5)
)
+ pow( (u_0 - u_1)/(x_1_5 - x_m0_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5,2)
+ (x_2_5 + x_1_5 - 2.0 * x_m0_5) * (x_2_5 + x_1_5 - x_0_5 - x_m0_5)
)
);
s1 = 4.0 * pow( (x_0_5 - x_m0_5)/(x_1_5 - x_m1_5), 2) *
(
pow( (u_m1 - u_0)/(x_0_5 - x_m1_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5,2) + (x_1_5 - x_m0_5) * (x_1_5 - x_0_5)
)
+ (u_1 - u_0) * (u_m1 - u_0) / ((x_1_5 - x_m0_5)*(x_0_5 - x_m1_5)) *
( 20.0 * pow(x_0_5 - x_m0_5,2) - (x_1_5 - x_0_5)*(x_m0_5 - x_m1_5)
- (x_1_5 - x_m0_5)*(x_0_5 - x_m1_5)
)
+ pow( (u_1 -u_0)/(x_1_5 - x_m0_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5, 2) + (x_m0_5 - x_m1_5)*(x_0_5 - x_m1_5)
)
);
s2 = 4.0 *pow( (x_0_5 - x_m0_5)/(x_0_5 - x_m2_5), 2) *
(
pow( (u_m2 - u_m1)/(x_m0_5 - x_m2_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5, 2) + (x_0_5 - x_m1_5)*(x_m0_5 - x_m1_5)
)
+ (u_0 - u_m1)*(u_m2 - u_m1) / ((x_0_5 - x_m1_5)*(x_m0_5 - x_m2_5)) *
( 20.0 * pow(x_0_5 - x_m0_5, 2)+ 2.0 * (x_0_5 - x_m1_5)*(x_m0_5 - x_m1_5)
+ (x_0_5 - x_m2_5)*(x_0_5 + x_m0_5 - 2.0 * x_m1_5)
)
+ pow( (u_0 - u_m1)/(x_0_5 - x_m1_5), 2) *
( 10.0 * pow(x_0_5 - x_m0_5, 2)
+ (2.0 * x_0_5 - x_m2_5 - x_m1_5)*(x_0_5 + x_m0_5 - x_m1_5 - x_m2_5)
)
);
}
// optimal weights
alpha0 = c0 / pow( (s0 + epsilon), 2);
alpha1 = c1 / pow( (s1 + epsilon), 2);
alpha2 = c2 / pow( (s2 + epsilon), 2);
sum = alpha0 + alpha1 + alpha2;
omega0 = alpha0 / sum;
omega1 = alpha1 / sum;
omega2 = alpha2 / sum;
d_back = v0 * omega0 + v1 * omega1 + v2 * omega2;
}
// calculate weno derivative at p4: p1<-l3-p2<-l2-p3<-l1-p4-r1->p5-r2->p6-r3->p7
// where px are level set function values at node x
// lx, rx are distance to the left/right node
__device__ inline
void weno_derivative_boundary(double & d_fore, double & d_back, double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds)
{
bool cross_interface = p3*p4<0 || p4*p5<0;
if(cross_interface){
double h3m,h2m,h1m,h0,h1,h2,h3;
double x3m,x2m,x1m,x0,x1,x2,x3;
select_stencil(h3m,h2m,h1m,h0,h1,h2,h3,x3m,x2m,x1m,x0,x1,x2,x3,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,ds);
weno_nonuniform(d_fore,d_back,h3m,h2m,h1m,h0,h1,h2,h3,x3m,x2m,x1m,x0,x1,x2,x3);
}// for nodes IMMEDIATELY adjacent to the boundary, use cubic ENO interpolant
else{
double v1 = (p2 - p1) / ds;
double v2 = (p3 - p2) / ds;
double v3 = (p4 - p3) / ds;
double v4 = (p5 - p4) / ds;
double v5 = (p6 - p5) / ds;
double v6 = (p7 - p6) / ds;
d_back = weno_onesided_derivative(v1,v2,v3,v4,v5);
d_fore = weno_onesided_derivative(v6,v5,v4,v3,v2);
}// if not a node IMMEDIATELY adjacent to the boundary, calculate weno derivatives as usual
}
__global__
void re_step(double * step, double const * lsf, bool const * mask, double const * deltat, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double epsilon = 1e-6 * dx;
if( xpr[ind]< epsilon || xpl[ind]<epsilon || ypf[ind]<epsilon || ypb[ind]<epsilon || zpu[ind]<epsilon || zpd[ind]<epsilon ){
step[ind] = 0;
return;
}// for a boundary node, do not change its value
double p1,p2,p3,p4,p5,p6,p7;
double r1,r2,r3,l1,l2,l3;
p4 = lsf[ind];
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
p1 = lsf[left3];
p2 = lsf[left2];
p3 = lsf[left1];
p5 = lsf[rght1];
p6 = lsf[rght2];
p7 = lsf[rght3];
r1 = xpr[ind];
r2 = xpr[rght1];
r3 = xpr[rght2];
l1 = xpl[ind];
l2 = xpl[left1];
l3 = xpl[left2];
double xR, xL;
weno_derivative_boundary(xR,xL,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dx);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
p1 = lsf[back3];
p2 = lsf[back2];
p3 = lsf[back1];
p5 = lsf[frnt1];
p6 = lsf[frnt2];
p7 = lsf[frnt3];
r1 = ypf[ind];
r2 = ypf[frnt1];
r3 = ypf[frnt2];
l1 = ypb[ind];
l2 = ypb[back1];
l3 = ypb[back2];
double yF, yB;
weno_derivative_boundary(yF,yB,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dy);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
p1 = lsf[down3];
p2 = lsf[down2];
p3 = lsf[down1];
p5 = lsf[upup1];
p6 = lsf[upup2];
p7 = lsf[upup3];
r1 = zpu[ind];
r2 = zpu[upup1];
r3 = zpu[upup2];
l1 = zpd[ind];
l2 = zpd[down1];
l3 = zpd[down2];
double zU, zD;
weno_derivative_boundary(zU,zD,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dz);
if (mask[ind]) {
step[ind] = ( sqrt( max2(pow(min2(0.0,xL),2),pow(max2(0.0,xR),2)) +
max2(pow(min2(0.0,yB),2),pow(max2(0.0,yF),2)) +
max2(pow(min2(0.0,zD),2),pow(max2(0.0,zU),2)) ) - 1
) * deltat[ind] * (-1.);
} else{
step[ind] = ( sqrt( max2(pow(max2(0.0,xL),2),pow(min2(0.0,xR),2)) +
max2(pow(max2(0.0,yB),2),pow(min2(0.0,yF),2)) +
max2(pow(max2(0.0,zD),2),pow(min2(0.0,zU),2)) ) - 1
) * deltat[ind] * (1.);
}
}
|
6c8a5d1a7f264e22669392308ae96a330a066076.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/bboxUtils.h"
#include "common/kernels/kernel.h"
#include "hip/hip_fp16.h"
#include <array>
namespace nvinfer1
{
namespace plugin
{
template <typename T_BBOX>
__device__ float bboxSize(const Bbox<T_BBOX>& bbox, const bool normalized)
{
if (float(bbox.xmax) < float(bbox.xmin) || float(bbox.ymax) < float(bbox.ymin))
{
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return 0;
}
else
{
float width = float(bbox.xmax) - float(bbox.xmin);
float height = float(bbox.ymax) - float(bbox.ymin);
if (normalized)
{
return width * height;
}
else
{
// If bbox is not within range [0, 1].
return (width + 1.f) * (height + 1.f);
}
}
}
template <typename T_BBOX>
__device__ void intersectBbox(
const Bbox<T_BBOX>& bbox1,
const Bbox<T_BBOX>& bbox2,
Bbox<T_BBOX>* intersect_bbox)
{
if (bbox2.xmin > bbox1.xmax || bbox2.xmax < bbox1.xmin || bbox2.ymin > bbox1.ymax || bbox2.ymax < bbox1.ymin)
{
// Return [0, 0, 0, 0] if there is no intersection.
intersect_bbox->xmin = T_BBOX(0);
intersect_bbox->ymin = T_BBOX(0);
intersect_bbox->xmax = T_BBOX(0);
intersect_bbox->ymax = T_BBOX(0);
}
else
{
intersect_bbox->xmin = max(bbox1.xmin, bbox2.xmin);
intersect_bbox->ymin = max(bbox1.ymin, bbox2.ymin);
intersect_bbox->xmax = min(bbox1.xmax, bbox2.xmax);
intersect_bbox->ymax = min(bbox1.ymax, bbox2.ymax);
}
}
template <>
__device__ void intersectBbox<__half>(
const Bbox<__half>& bbox1,
const Bbox<__half>& bbox2,
Bbox<__half>* intersect_bbox)
{
if (float(bbox2.xmin) > float(bbox1.xmax)
|| float(bbox2.xmax) < float(bbox1.xmin)
|| float(bbox2.ymin) > float(bbox1.ymax)
|| float(bbox2.ymax) < float(bbox1.ymin))
{
// Return [0, 0, 0, 0] if there is no intersection.
intersect_bbox->xmin = __half(0);
intersect_bbox->ymin = __half(0);
intersect_bbox->xmax = __half(0);
intersect_bbox->ymax = __half(0);
}
else
{
intersect_bbox->xmin = max(float(bbox1.xmin), float(bbox2.xmin));
intersect_bbox->ymin = max(float(bbox1.ymin), float(bbox2.ymin));
intersect_bbox->xmax = min(float(bbox1.xmax), float(bbox2.xmax));
intersect_bbox->ymax = min(float(bbox1.ymax), float(bbox2.ymax));
}
}
template <typename T_BBOX>
__device__ Bbox<T_BBOX> getDiagonalMinMaxSortedBox(const Bbox<T_BBOX>& bbox1)
{
Bbox<T_BBOX> result;
result.xmin = min(bbox1.xmin, bbox1.xmax);
result.xmax = max(bbox1.xmin, bbox1.xmax);
result.ymin = min(bbox1.ymin, bbox1.ymax);
result.ymax = max(bbox1.ymin, bbox1.ymax);
return result;
}
template <>
__device__ Bbox<__half> getDiagonalMinMaxSortedBox(const Bbox<__half>& bbox1)
{
Bbox<__half> result;
result.xmin = min(float(bbox1.xmin), float(bbox1.xmax));
result.xmax = max(float(bbox1.xmin), float(bbox1.xmax));
result.ymin = min(float(bbox1.ymin), float(bbox1.ymax));
result.ymax = max(float(bbox1.ymin), float(bbox1.ymax));
return result;
}
template <typename T_BBOX>
__device__ float jaccardOverlap(
const Bbox<T_BBOX>& bbox1, const Bbox<T_BBOX>& bbox2, const bool normalized, const bool caffeSemantics)
{
Bbox<T_BBOX> intersect_bbox;
Bbox<T_BBOX> localbbox1 = getDiagonalMinMaxSortedBox(bbox1);
Bbox<T_BBOX> localbbox2 = getDiagonalMinMaxSortedBox(bbox2);
intersectBbox(localbbox1, localbbox2, &intersect_bbox);
float intersect_width, intersect_height;
// Only when using Caffe semantics, IOU calculation adds "1" to width and height if bbox is not normalized.
// https://github.com/weiliu89/caffe/blob/ssd/src/caffe/util/bbox_util.cpp#L92-L97
if (normalized || !caffeSemantics)
{
intersect_width = float(intersect_bbox.xmax) - float(intersect_bbox.xmin);
intersect_height = float(intersect_bbox.ymax) - float(intersect_bbox.ymin);
}
else
{
intersect_width = float(intersect_bbox.xmax) - float(intersect_bbox.xmin) + float(T_BBOX(1));
intersect_height = float(intersect_bbox.ymax) - float(intersect_bbox.ymin) + float(T_BBOX(1));
}
if (intersect_width > 0 && intersect_height > 0)
{
float intersect_size = intersect_width * intersect_height;
float bbox1_size = bboxSize(localbbox1, normalized);
float bbox2_size = bboxSize(localbbox2, normalized);
return intersect_size / (bbox1_size + bbox2_size - intersect_size);
}
else
{
return 0.;
}
}
template <typename T_BBOX>
__device__ void emptyBboxInfo(
BboxInfo<T_BBOX>* bbox_info)
{
bbox_info->conf_score = T_BBOX(0);
bbox_info->label = -2; // -1 is used for all labels when shared_location is ture
bbox_info->bbox_idx = -1;
bbox_info->kept = false;
}
/********** new NMS for only score and index array **********/
template <typename T_SCORE, typename T_BBOX, int TSIZE>
__global__ void allClassNMS_kernel(const int num, const int num_classes, const int num_preds_per_class, const int top_k,
const float nms_threshold, const bool share_location, const bool isNormalized,
T_BBOX* bbox_data, // bbox_data should be float to preserve location information
T_SCORE* beforeNMS_scores, int* beforeNMS_index_array, T_SCORE* afterNMS_scores, int* afterNMS_index_array,
bool flipXY, const float score_shift, bool caffeSemantics)
{
//__shared__ bool kept_bboxinfo_flag[CAFFE_CUDA_NUM_THREADS * TSIZE];
extern __shared__ bool kept_bboxinfo_flag[];
for (int i = 0; i < num; i++)
{
int32_t const offset = i * num_classes * num_preds_per_class + blockIdx.x * num_preds_per_class;
// Should not write data beyond [offset, top_k).
int32_t const max_idx = offset + top_k;
// Should not read beyond [offset, num_preds_per_class).
int32_t const max_read_idx = offset + min(top_k, num_preds_per_class);
int32_t const bbox_idx_offset = i * num_preds_per_class * (share_location ? 1 : num_classes);
// local thread data
int loc_bboxIndex[TSIZE];
Bbox<T_BBOX> loc_bbox[TSIZE];
// initialize Bbox, Bboxinfo, kept_bboxinfo_flag
// Eliminate shared memory RAW hazard
__syncthreads();
#pragma unroll
for (int t = 0; t < TSIZE; t++)
{
const int cur_idx = threadIdx.x + blockDim.x * t;
const int item_idx = offset + cur_idx;
// Init all output data
if (item_idx < max_idx)
{
// Do not access data if it exceeds read boundary
if (item_idx < max_read_idx)
{
loc_bboxIndex[t] = beforeNMS_index_array[item_idx];
}
else
{
loc_bboxIndex[t] = -1;
}
if (loc_bboxIndex[t] != -1)
{
const int bbox_data_idx = share_location ? (loc_bboxIndex[t] % num_preds_per_class + bbox_idx_offset) : loc_bboxIndex[t];
loc_bbox[t].xmin = flipXY ? bbox_data[bbox_data_idx * 4 + 1] : bbox_data[bbox_data_idx * 4 + 0];
loc_bbox[t].ymin = flipXY ? bbox_data[bbox_data_idx * 4 + 0] : bbox_data[bbox_data_idx * 4 + 1];
loc_bbox[t].xmax = flipXY ? bbox_data[bbox_data_idx * 4 + 3] : bbox_data[bbox_data_idx * 4 + 2];
loc_bbox[t].ymax = flipXY ? bbox_data[bbox_data_idx * 4 + 2] : bbox_data[bbox_data_idx * 4 + 3];
kept_bboxinfo_flag[cur_idx] = true;
}
else
{
kept_bboxinfo_flag[cur_idx] = false;
}
}
else
{
kept_bboxinfo_flag[cur_idx] = false;
}
}
// filter out overlapped boxes with lower scores
int ref_item_idx = offset;
int32_t ref_bbox_idx = -1;
if (ref_item_idx < max_read_idx)
{
ref_bbox_idx = share_location
? (beforeNMS_index_array[ref_item_idx] % num_preds_per_class + bbox_idx_offset)
: beforeNMS_index_array[ref_item_idx];
}
while ((ref_bbox_idx != -1) && ref_item_idx < max_read_idx)
{
Bbox<T_BBOX> ref_bbox;
ref_bbox.xmin = flipXY ? bbox_data[ref_bbox_idx * 4 + 1] : bbox_data[ref_bbox_idx * 4 + 0];
ref_bbox.ymin = flipXY ? bbox_data[ref_bbox_idx * 4 + 0] : bbox_data[ref_bbox_idx * 4 + 1];
ref_bbox.xmax = flipXY ? bbox_data[ref_bbox_idx * 4 + 3] : bbox_data[ref_bbox_idx * 4 + 2];
ref_bbox.ymax = flipXY ? bbox_data[ref_bbox_idx * 4 + 2] : bbox_data[ref_bbox_idx * 4 + 3];
// Eliminate shared memory RAW hazard
__syncthreads();
for (int t = 0; t < TSIZE; t++)
{
const int cur_idx = threadIdx.x + blockDim.x * t;
const int item_idx = offset + cur_idx;
if ((kept_bboxinfo_flag[cur_idx]) && (item_idx > ref_item_idx))
{
if (jaccardOverlap(ref_bbox, loc_bbox[t], isNormalized, caffeSemantics) > nms_threshold)
{
kept_bboxinfo_flag[cur_idx] = false;
}
}
}
__syncthreads();
do
{
ref_item_idx++;
} while (ref_item_idx < max_read_idx && !kept_bboxinfo_flag[ref_item_idx - offset]);
// Move to next valid point
if (ref_item_idx < max_read_idx)
{
ref_bbox_idx = share_location
? (beforeNMS_index_array[ref_item_idx] % num_preds_per_class + bbox_idx_offset)
: beforeNMS_index_array[ref_item_idx];
}
}
// store data
for (int t = 0; t < TSIZE; t++)
{
const int cur_idx = threadIdx.x + blockDim.x * t;
const int read_item_idx = offset + cur_idx;
const int write_item_idx = (i * num_classes * top_k + blockIdx.x * top_k) + cur_idx;
/*
* If not not keeping the bbox
* Set the score to 0
* Set the bounding box index to -1
*/
if (read_item_idx < max_idx)
{
afterNMS_scores[write_item_idx] = kept_bboxinfo_flag[cur_idx] ? T_SCORE(beforeNMS_scores[read_item_idx]) : T_SCORE(score_shift);
afterNMS_index_array[write_item_idx] = kept_bboxinfo_flag[cur_idx] ? loc_bboxIndex[t] : -1;
}
}
}
}
template <typename T_SCORE, typename T_BBOX>
pluginStatus_t allClassNMS_gpu(hipStream_t stream, const int num, const int num_classes, const int num_preds_per_class,
const int top_k, const float nms_threshold, const bool share_location, const bool isNormalized, void* bbox_data,
void* beforeNMS_scores, void* beforeNMS_index_array, void* afterNMS_scores, void* afterNMS_index_array, bool flipXY,
const float score_shift, bool caffeSemantics)
{
#define P(tsize) allClassNMS_kernel<T_SCORE, T_BBOX, (tsize)>
void (*kernel[8])(const int, const int, const int, const int, const float, const bool, const bool, T_BBOX*,
T_SCORE*, int*, T_SCORE*, int*, bool, const float, bool)
= {
P(1),
P(2),
P(3),
P(4),
P(5),
P(6),
P(7),
P(8),
};
const int BS = 512;
const int GS = num_classes;
const int t_size = (top_k + BS - 1) / BS;
kernel[t_size -hipLaunchKernelGGL(( 1)], dim3(GS), dim3(BS), BS * t_size * sizeof(bool), stream, num, num_classes, num_preds_per_class, top_k,
nms_threshold, share_location, isNormalized, (T_BBOX*) bbox_data, (T_SCORE*) beforeNMS_scores,
(int*) beforeNMS_index_array, (T_SCORE*) afterNMS_scores, (int*) afterNMS_index_array, flipXY, score_shift,
caffeSemantics);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// allClassNMS LAUNCH CONFIG
typedef pluginStatus_t (*nmsFunc)(hipStream_t, const int, const int, const int, const int, const float, const bool,
const bool, void*, void*, void*, void*, void*, bool, const float, bool);
struct nmsLaunchConfigSSD
{
DataType t_score;
DataType t_bbox;
nmsFunc function;
nmsLaunchConfigSSD(DataType t_score, DataType t_bbox)
: t_score(t_score)
, t_bbox(t_bbox)
, function(nullptr)
{
}
nmsLaunchConfigSSD(DataType t_score, DataType t_bbox, nmsFunc function)
: t_score(t_score)
, t_bbox(t_bbox)
, function(function)
{
}
bool operator==(const nmsLaunchConfigSSD& other)
{
return t_score == other.t_score && t_bbox == other.t_bbox;
}
};
static std::array<nmsLaunchConfigSSD, 2> nmsSsdLCOptions = {
nmsLaunchConfigSSD(DataType::kFLOAT, DataType::kFLOAT, allClassNMS_gpu<float, float>),
nmsLaunchConfigSSD(DataType::kHALF, DataType::kHALF, allClassNMS_gpu<__half, __half>)
};
pluginStatus_t allClassNMS(hipStream_t stream, const int num, const int num_classes, const int num_preds_per_class,
const int top_k, const float nms_threshold, const bool share_location, const bool isNormalized,
const DataType DT_SCORE, const DataType DT_BBOX, void* bbox_data, void* beforeNMS_scores,
void* beforeNMS_index_array, void* afterNMS_scores, void* afterNMS_index_array, bool flipXY,
const float score_shift, bool caffeSemantics)
{
nmsLaunchConfigSSD lc = nmsLaunchConfigSSD(DT_SCORE, DT_BBOX);
for (unsigned i = 0; i < nmsSsdLCOptions.size(); ++i)
{
if (lc == nmsSsdLCOptions[i])
{
DEBUG_PRINTF("all class nms kernel %d\n", i);
return nmsSsdLCOptions[i].function(stream, num, num_classes, num_preds_per_class, top_k, nms_threshold,
share_location, isNormalized, bbox_data, beforeNMS_scores, beforeNMS_index_array, afterNMS_scores,
afterNMS_index_array, flipXY, score_shift, caffeSemantics);
}
}
return STATUS_BAD_PARAM;
}
} // namespace plugin
} // namespace nvinfer1
| 6c8a5d1a7f264e22669392308ae96a330a066076.cu | /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/bboxUtils.h"
#include "common/kernels/kernel.h"
#include "cuda_fp16.h"
#include <array>
namespace nvinfer1
{
namespace plugin
{
template <typename T_BBOX>
__device__ float bboxSize(const Bbox<T_BBOX>& bbox, const bool normalized)
{
if (float(bbox.xmax) < float(bbox.xmin) || float(bbox.ymax) < float(bbox.ymin))
{
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return 0;
}
else
{
float width = float(bbox.xmax) - float(bbox.xmin);
float height = float(bbox.ymax) - float(bbox.ymin);
if (normalized)
{
return width * height;
}
else
{
// If bbox is not within range [0, 1].
return (width + 1.f) * (height + 1.f);
}
}
}
template <typename T_BBOX>
__device__ void intersectBbox(
const Bbox<T_BBOX>& bbox1,
const Bbox<T_BBOX>& bbox2,
Bbox<T_BBOX>* intersect_bbox)
{
if (bbox2.xmin > bbox1.xmax || bbox2.xmax < bbox1.xmin || bbox2.ymin > bbox1.ymax || bbox2.ymax < bbox1.ymin)
{
// Return [0, 0, 0, 0] if there is no intersection.
intersect_bbox->xmin = T_BBOX(0);
intersect_bbox->ymin = T_BBOX(0);
intersect_bbox->xmax = T_BBOX(0);
intersect_bbox->ymax = T_BBOX(0);
}
else
{
intersect_bbox->xmin = max(bbox1.xmin, bbox2.xmin);
intersect_bbox->ymin = max(bbox1.ymin, bbox2.ymin);
intersect_bbox->xmax = min(bbox1.xmax, bbox2.xmax);
intersect_bbox->ymax = min(bbox1.ymax, bbox2.ymax);
}
}
template <>
__device__ void intersectBbox<__half>(
const Bbox<__half>& bbox1,
const Bbox<__half>& bbox2,
Bbox<__half>* intersect_bbox)
{
if (float(bbox2.xmin) > float(bbox1.xmax)
|| float(bbox2.xmax) < float(bbox1.xmin)
|| float(bbox2.ymin) > float(bbox1.ymax)
|| float(bbox2.ymax) < float(bbox1.ymin))
{
// Return [0, 0, 0, 0] if there is no intersection.
intersect_bbox->xmin = __half(0);
intersect_bbox->ymin = __half(0);
intersect_bbox->xmax = __half(0);
intersect_bbox->ymax = __half(0);
}
else
{
intersect_bbox->xmin = max(float(bbox1.xmin), float(bbox2.xmin));
intersect_bbox->ymin = max(float(bbox1.ymin), float(bbox2.ymin));
intersect_bbox->xmax = min(float(bbox1.xmax), float(bbox2.xmax));
intersect_bbox->ymax = min(float(bbox1.ymax), float(bbox2.ymax));
}
}
template <typename T_BBOX>
__device__ Bbox<T_BBOX> getDiagonalMinMaxSortedBox(const Bbox<T_BBOX>& bbox1)
{
Bbox<T_BBOX> result;
result.xmin = min(bbox1.xmin, bbox1.xmax);
result.xmax = max(bbox1.xmin, bbox1.xmax);
result.ymin = min(bbox1.ymin, bbox1.ymax);
result.ymax = max(bbox1.ymin, bbox1.ymax);
return result;
}
template <>
__device__ Bbox<__half> getDiagonalMinMaxSortedBox(const Bbox<__half>& bbox1)
{
Bbox<__half> result;
result.xmin = min(float(bbox1.xmin), float(bbox1.xmax));
result.xmax = max(float(bbox1.xmin), float(bbox1.xmax));
result.ymin = min(float(bbox1.ymin), float(bbox1.ymax));
result.ymax = max(float(bbox1.ymin), float(bbox1.ymax));
return result;
}
template <typename T_BBOX>
__device__ float jaccardOverlap(
const Bbox<T_BBOX>& bbox1, const Bbox<T_BBOX>& bbox2, const bool normalized, const bool caffeSemantics)
{
Bbox<T_BBOX> intersect_bbox;
Bbox<T_BBOX> localbbox1 = getDiagonalMinMaxSortedBox(bbox1);
Bbox<T_BBOX> localbbox2 = getDiagonalMinMaxSortedBox(bbox2);
intersectBbox(localbbox1, localbbox2, &intersect_bbox);
float intersect_width, intersect_height;
// Only when using Caffe semantics, IOU calculation adds "1" to width and height if bbox is not normalized.
// https://github.com/weiliu89/caffe/blob/ssd/src/caffe/util/bbox_util.cpp#L92-L97
if (normalized || !caffeSemantics)
{
intersect_width = float(intersect_bbox.xmax) - float(intersect_bbox.xmin);
intersect_height = float(intersect_bbox.ymax) - float(intersect_bbox.ymin);
}
else
{
intersect_width = float(intersect_bbox.xmax) - float(intersect_bbox.xmin) + float(T_BBOX(1));
intersect_height = float(intersect_bbox.ymax) - float(intersect_bbox.ymin) + float(T_BBOX(1));
}
if (intersect_width > 0 && intersect_height > 0)
{
float intersect_size = intersect_width * intersect_height;
float bbox1_size = bboxSize(localbbox1, normalized);
float bbox2_size = bboxSize(localbbox2, normalized);
return intersect_size / (bbox1_size + bbox2_size - intersect_size);
}
else
{
return 0.;
}
}
template <typename T_BBOX>
__device__ void emptyBboxInfo(
BboxInfo<T_BBOX>* bbox_info)
{
bbox_info->conf_score = T_BBOX(0);
bbox_info->label = -2; // -1 is used for all labels when shared_location is ture
bbox_info->bbox_idx = -1;
bbox_info->kept = false;
}
/********** new NMS for only score and index array **********/
template <typename T_SCORE, typename T_BBOX, int TSIZE>
__global__ void allClassNMS_kernel(const int num, const int num_classes, const int num_preds_per_class, const int top_k,
const float nms_threshold, const bool share_location, const bool isNormalized,
T_BBOX* bbox_data, // bbox_data should be float to preserve location information
T_SCORE* beforeNMS_scores, int* beforeNMS_index_array, T_SCORE* afterNMS_scores, int* afterNMS_index_array,
bool flipXY, const float score_shift, bool caffeSemantics)
{
//__shared__ bool kept_bboxinfo_flag[CAFFE_CUDA_NUM_THREADS * TSIZE];
extern __shared__ bool kept_bboxinfo_flag[];
for (int i = 0; i < num; i++)
{
int32_t const offset = i * num_classes * num_preds_per_class + blockIdx.x * num_preds_per_class;
// Should not write data beyond [offset, top_k).
int32_t const max_idx = offset + top_k;
// Should not read beyond [offset, num_preds_per_class).
int32_t const max_read_idx = offset + min(top_k, num_preds_per_class);
int32_t const bbox_idx_offset = i * num_preds_per_class * (share_location ? 1 : num_classes);
// local thread data
int loc_bboxIndex[TSIZE];
Bbox<T_BBOX> loc_bbox[TSIZE];
// initialize Bbox, Bboxinfo, kept_bboxinfo_flag
// Eliminate shared memory RAW hazard
__syncthreads();
#pragma unroll
for (int t = 0; t < TSIZE; t++)
{
const int cur_idx = threadIdx.x + blockDim.x * t;
const int item_idx = offset + cur_idx;
// Init all output data
if (item_idx < max_idx)
{
// Do not access data if it exceeds read boundary
if (item_idx < max_read_idx)
{
loc_bboxIndex[t] = beforeNMS_index_array[item_idx];
}
else
{
loc_bboxIndex[t] = -1;
}
if (loc_bboxIndex[t] != -1)
{
const int bbox_data_idx = share_location ? (loc_bboxIndex[t] % num_preds_per_class + bbox_idx_offset) : loc_bboxIndex[t];
loc_bbox[t].xmin = flipXY ? bbox_data[bbox_data_idx * 4 + 1] : bbox_data[bbox_data_idx * 4 + 0];
loc_bbox[t].ymin = flipXY ? bbox_data[bbox_data_idx * 4 + 0] : bbox_data[bbox_data_idx * 4 + 1];
loc_bbox[t].xmax = flipXY ? bbox_data[bbox_data_idx * 4 + 3] : bbox_data[bbox_data_idx * 4 + 2];
loc_bbox[t].ymax = flipXY ? bbox_data[bbox_data_idx * 4 + 2] : bbox_data[bbox_data_idx * 4 + 3];
kept_bboxinfo_flag[cur_idx] = true;
}
else
{
kept_bboxinfo_flag[cur_idx] = false;
}
}
else
{
kept_bboxinfo_flag[cur_idx] = false;
}
}
// filter out overlapped boxes with lower scores
int ref_item_idx = offset;
int32_t ref_bbox_idx = -1;
if (ref_item_idx < max_read_idx)
{
ref_bbox_idx = share_location
? (beforeNMS_index_array[ref_item_idx] % num_preds_per_class + bbox_idx_offset)
: beforeNMS_index_array[ref_item_idx];
}
while ((ref_bbox_idx != -1) && ref_item_idx < max_read_idx)
{
Bbox<T_BBOX> ref_bbox;
ref_bbox.xmin = flipXY ? bbox_data[ref_bbox_idx * 4 + 1] : bbox_data[ref_bbox_idx * 4 + 0];
ref_bbox.ymin = flipXY ? bbox_data[ref_bbox_idx * 4 + 0] : bbox_data[ref_bbox_idx * 4 + 1];
ref_bbox.xmax = flipXY ? bbox_data[ref_bbox_idx * 4 + 3] : bbox_data[ref_bbox_idx * 4 + 2];
ref_bbox.ymax = flipXY ? bbox_data[ref_bbox_idx * 4 + 2] : bbox_data[ref_bbox_idx * 4 + 3];
// Eliminate shared memory RAW hazard
__syncthreads();
for (int t = 0; t < TSIZE; t++)
{
const int cur_idx = threadIdx.x + blockDim.x * t;
const int item_idx = offset + cur_idx;
if ((kept_bboxinfo_flag[cur_idx]) && (item_idx > ref_item_idx))
{
if (jaccardOverlap(ref_bbox, loc_bbox[t], isNormalized, caffeSemantics) > nms_threshold)
{
kept_bboxinfo_flag[cur_idx] = false;
}
}
}
__syncthreads();
do
{
ref_item_idx++;
} while (ref_item_idx < max_read_idx && !kept_bboxinfo_flag[ref_item_idx - offset]);
// Move to next valid point
if (ref_item_idx < max_read_idx)
{
ref_bbox_idx = share_location
? (beforeNMS_index_array[ref_item_idx] % num_preds_per_class + bbox_idx_offset)
: beforeNMS_index_array[ref_item_idx];
}
}
// store data
for (int t = 0; t < TSIZE; t++)
{
const int cur_idx = threadIdx.x + blockDim.x * t;
const int read_item_idx = offset + cur_idx;
const int write_item_idx = (i * num_classes * top_k + blockIdx.x * top_k) + cur_idx;
/*
* If not not keeping the bbox
* Set the score to 0
* Set the bounding box index to -1
*/
if (read_item_idx < max_idx)
{
afterNMS_scores[write_item_idx] = kept_bboxinfo_flag[cur_idx] ? T_SCORE(beforeNMS_scores[read_item_idx]) : T_SCORE(score_shift);
afterNMS_index_array[write_item_idx] = kept_bboxinfo_flag[cur_idx] ? loc_bboxIndex[t] : -1;
}
}
}
}
template <typename T_SCORE, typename T_BBOX>
pluginStatus_t allClassNMS_gpu(cudaStream_t stream, const int num, const int num_classes, const int num_preds_per_class,
const int top_k, const float nms_threshold, const bool share_location, const bool isNormalized, void* bbox_data,
void* beforeNMS_scores, void* beforeNMS_index_array, void* afterNMS_scores, void* afterNMS_index_array, bool flipXY,
const float score_shift, bool caffeSemantics)
{
#define P(tsize) allClassNMS_kernel<T_SCORE, T_BBOX, (tsize)>
void (*kernel[8])(const int, const int, const int, const int, const float, const bool, const bool, T_BBOX*,
T_SCORE*, int*, T_SCORE*, int*, bool, const float, bool)
= {
P(1),
P(2),
P(3),
P(4),
P(5),
P(6),
P(7),
P(8),
};
const int BS = 512;
const int GS = num_classes;
const int t_size = (top_k + BS - 1) / BS;
kernel[t_size - 1]<<<GS, BS, BS * t_size * sizeof(bool), stream>>>(num, num_classes, num_preds_per_class, top_k,
nms_threshold, share_location, isNormalized, (T_BBOX*) bbox_data, (T_SCORE*) beforeNMS_scores,
(int*) beforeNMS_index_array, (T_SCORE*) afterNMS_scores, (int*) afterNMS_index_array, flipXY, score_shift,
caffeSemantics);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// allClassNMS LAUNCH CONFIG
typedef pluginStatus_t (*nmsFunc)(cudaStream_t, const int, const int, const int, const int, const float, const bool,
const bool, void*, void*, void*, void*, void*, bool, const float, bool);
struct nmsLaunchConfigSSD
{
DataType t_score;
DataType t_bbox;
nmsFunc function;
nmsLaunchConfigSSD(DataType t_score, DataType t_bbox)
: t_score(t_score)
, t_bbox(t_bbox)
, function(nullptr)
{
}
nmsLaunchConfigSSD(DataType t_score, DataType t_bbox, nmsFunc function)
: t_score(t_score)
, t_bbox(t_bbox)
, function(function)
{
}
bool operator==(const nmsLaunchConfigSSD& other)
{
return t_score == other.t_score && t_bbox == other.t_bbox;
}
};
static std::array<nmsLaunchConfigSSD, 2> nmsSsdLCOptions = {
nmsLaunchConfigSSD(DataType::kFLOAT, DataType::kFLOAT, allClassNMS_gpu<float, float>),
nmsLaunchConfigSSD(DataType::kHALF, DataType::kHALF, allClassNMS_gpu<__half, __half>)
};
pluginStatus_t allClassNMS(cudaStream_t stream, const int num, const int num_classes, const int num_preds_per_class,
const int top_k, const float nms_threshold, const bool share_location, const bool isNormalized,
const DataType DT_SCORE, const DataType DT_BBOX, void* bbox_data, void* beforeNMS_scores,
void* beforeNMS_index_array, void* afterNMS_scores, void* afterNMS_index_array, bool flipXY,
const float score_shift, bool caffeSemantics)
{
nmsLaunchConfigSSD lc = nmsLaunchConfigSSD(DT_SCORE, DT_BBOX);
for (unsigned i = 0; i < nmsSsdLCOptions.size(); ++i)
{
if (lc == nmsSsdLCOptions[i])
{
DEBUG_PRINTF("all class nms kernel %d\n", i);
return nmsSsdLCOptions[i].function(stream, num, num_classes, num_preds_per_class, top_k, nms_threshold,
share_location, isNormalized, bbox_data, beforeNMS_scores, beforeNMS_index_array, afterNMS_scores,
afterNMS_index_array, flipXY, score_shift, caffeSemantics);
}
}
return STATUS_BAD_PARAM;
}
} // namespace plugin
} // namespace nvinfer1
|
38fee62456419f65cfcac268a1ccae1e04ff4398.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
MIT License
Copyright (c) 2016 Antti-Pekka Hynninen
Copyright (c) 2016 Oak Ridge National Laboratory (UT-Batelle)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*******************************************************************************/
//
// Micro-benchmark for memory operations
//
#include <cstdio>
#include "CudaUtils.h"
#include "cumbUtils.h"
__global__ void clearCacheKernel(int* buffer, const int bufferSize) {
for (int t=threadIdx.x + blockIdx.x*blockDim.x;t < bufferSize;t+=blockDim.x*gridDim.x) {
buffer[t] = t;
}
}
template<typename T>
__global__ void memoryTransactionKernel(T* buffer) {
__shared__ T sh_a;
int i = threadIdx.x;
long long int start = clock64();
T a = buffer[i];
sh_a = a;
long long int end = clock64();
printf("%d %f\n", (int)(end - start), (float)sh_a);
}
template <typename T, int niter>
__global__ void pChaseKernel(T* array, const int skip, const int offset) {
__shared__ int duration[niter];
__shared__ T dummy[niter];
{
T j = threadIdx.x*skip + offset;
for (int it=0;it < niter;it++) {
int start = clock();
j = array[j];
dummy[it] = j;
int end = clock();
duration[it] = end - start;
}
}
if (threadIdx.x == 0) {
#if 1
int total_duration = 0;
int total_duration2 = 0;
int total_dummy = 0;
for (int it=1;it < niter;it++) {
int d = duration[it];
total_duration += d;
total_duration2 += d*d;
total_dummy += (int)dummy[it];
}
float avg_duration = (float)total_duration/(float)(niter - 1);
float avg_duration2 = (float)total_duration2/(float)(niter - 1);
float std_duration = sqrtf(avg_duration2 - avg_duration*avg_duration);
printf("%1.2f %1.2f %d\n", avg_duration, std_duration, total_dummy);
#else
for (int it=0;it < niter;it++) {
printf("%d %d\n", duration[it], (int)dummy[it]);
}
#endif
}
}
template <typename T>
__global__ void pChaseKernel2(T* array, const int skip, const int offset, const int niter) {
extern __shared__ char dummychar[];
volatile int* duration = (int*)dummychar;
volatile T* dummy = (T*)&dummychar[niter*sizeof(int)];
{
T j = threadIdx.x*skip + offset;
for (int it=0;it < niter;it++) {
// int start = clock();
duration[it] = clock();
j = array[j];
dummy[it*blockDim.x + threadIdx.x] = j;
// int end = clock();
// duration[it] = end - start;
duration[it] = clock() - duration[it];
}
}
if (threadIdx.x == 0) {
#if 1
int total_duration = 0;
int total_duration2 = 0;
int total_dummy = 0;
for (int it=1;it < niter;it++) {
int d = duration[it];
total_duration += d;
total_duration2 += d*d;
total_dummy += (int)dummy[it];
}
float avg_duration = (float)total_duration/(float)(niter - 1);
float avg_duration2 = (float)total_duration2/(float)(niter - 1);
float std_duration = sqrtf(avg_duration2 - avg_duration*avg_duration);
printf("%1.2f %1.2f %d\n", avg_duration, std_duration, total_dummy);
#else
for (int it=0;it < niter;it++) {
printf("%d %d\n", duration[it], (int)dummy[it]);
}
#endif
}
}
template <typename T>
__global__ void pChaseMaxwellKernel(T* array, const int skip, const int offset, const int niter) {
extern __shared__ char dummychar[];
T* dummy = (T*)dummychar;
int start = clock();
T j = threadIdx.x*skip + offset;
for (int it=0;it < niter;it++) {
j = array[j];
dummy[it] = j;
}
int end = clock();
int duration = (int)(end - start);
if (threadIdx.x == 0) {
int max_dummy = 0;
for (int it=0;it < niter;it++) max_dummy = max(max_dummy, (int)dummy[it]);
printf("%1.2f %d\n", (float)duration/(float)niter, max_dummy);
}
}
template <typename T>
__global__ void pChaseMaxwellKernel2(T* array, const int skip, const int offset, const int niter) {
extern __shared__ char dummychar[];
volatile T* dummy = (T*)dummychar;
int start = clock();
T j = threadIdx.x*skip + offset;
for (int it=0;it < niter;it++) {
T j0 = j;
j = array[j];
// printf("%d %d | %d -> %d\n", it, threadIdx.x, j0, j);
dummy[it*blockDim.x + threadIdx.x] = j;
}
int end = clock();
int duration = (int)(end - start);
if (threadIdx.x == 0) {
int max_dummy = 0;
for (int it=0;it < niter;it++) {
for (int t=0;t < blockDim.x;t++) {
max_dummy = max(max_dummy, (int)dummy[it*blockDim.x + t]);
}
}
printf("%1.2f %d\n", (float)duration/(float)niter, max_dummy);
}
}
__global__ void writePCLKernel(int* array, const int niter) {
for (int it=0;it < niter;it++) {
array[it*32 + threadIdx.x] = 1;
}
}
__global__ void copyPCLKernel(int* src, int* dst, const int niter) {
__shared__ int shbuf[32];
for (int it=0;it < niter;it++) {
__syncthreads();
shbuf[threadIdx.x] = src[it*32 + threadIdx.x];
__syncthreads();
dst[it*32 + threadIdx.x] = shbuf[threadIdx.x];
}
}
template <typename T>
__global__ void memoryLatencyKernel(T* bufferIn, T* bufferOut) {
extern __shared__ int shCycles[];
// int p = threadIdx.x*(128/sizeof(T)) + 1 + blockIdx.x*1024;
int p = threadIdx.x;
//if (threadIdx.x % 32 > 0) p += 128;
long long int start = clock64();
T a = bufferIn[p];
long long int end = clock64();
shCycles[threadIdx.x] = (int)(end - start);
__syncthreads();
if (blockIdx.x == 0 && threadIdx.x == 0) {
int minCycle = (1 << 30);
int maxCycle = 0;
int aveCycle = 0;
for (int i=0;i < blockDim.x;i++) {
minCycle = min(minCycle, shCycles[i]);
maxCycle = max(maxCycle, shCycles[i]);
aveCycle += shCycles[i];
}
printf("%d %d %d\n", minCycle, maxCycle, aveCycle/blockDim.x);
}
bufferOut[threadIdx.x] = a;
}
template <typename T>
__global__ void memoryLatencyKernel2(T* bufferOut) {
extern __shared__ int shCycles[];
int p = threadIdx.x + 1;
long long int start = clock64();
bufferOut[p] = 1.2;
long long int end = clock64();
shCycles[threadIdx.x] = (int)(end - start);
__syncthreads();
if (blockIdx.x == 0 && threadIdx.x == 0) {
int minCycle = (1 << 30);
int maxCycle = 0;
int aveCycle = 0;
for (int i=0;i < blockDim.x;i++) {
minCycle = min(minCycle, shCycles[i]);
maxCycle = max(maxCycle, shCycles[i]);
aveCycle += shCycles[i];
}
printf("%d %d %d\n", minCycle, maxCycle, aveCycle/blockDim.x);
}
}
template <typename T>
__global__ void memoryWriteKernel(T* buffer, const int nwrite, const int stride, const int offset) {
// for (int t=threadIdx.x + blockIdx.x*blockDim.x;t < nwrite;t+=blockDim.x*gridDim.x) {
int t = threadIdx.x + blockIdx.x*blockDim.x;
int wid = t / 32;
int tid = t % 32;
// T a = (t*nwrite*blockDim.x) + nwrite*threadIdx.x;
// buffer[t*stride + offset] = a;
buffer[wid*stride + tid + offset] = 1;
// }
}
__global__ void memoryWriteKernel2(char* buffer, const int nwrite, const int stride, const int offset) {
int t = threadIdx.x + blockIdx.x*blockDim.x;
int wid = t / 32;
int tid = t % 32;
if (tid == 0) buffer[wid*stride + offset] = 1;
}
__global__ void cyclesPerOperationKernel() {
// int a = threadIdx.x;
// int b = blockIdx.x;
long long int start = clock64();
int a = threadIdx.x;
int b = blockIdx.x;
b += a;
a *= 17;
b += 3;
a -= b;
long long int end = clock64();
printf("threadIdx.x %d cycles %lld a %d b %d\n", threadIdx.x, end-start, a, b);
}
__global__ void cacheLineKernel(double* buffer, double* res) {
int t = threadIdx.x;
//
double a = buffer[t+1];
//
double sum = 0.0;
for (int i=0;i < 32;i++) {
sum += __shfl(a, i);
}
if (threadIdx.x == 0) res[0] = sum;
}
__global__ void writeVolMmk(const int* pos, const int n, double* buffer) {
for (int i=threadIdx.x;i < n;i+=blockDim.x) {
int posval = pos[i];
buffer[posval] = 1.0;
}
}
__global__ void readWriteVolMmk(const int* posr, const int* posw, const int n, double* buffer) {
extern __shared__ double shBuffer[];
__syncthreads();
for (int i=threadIdx.x;i < n;i+=blockDim.x) {
int posr_val = posr[i];
shBuffer[i] = buffer[posr_val];
}
__syncthreads();
for (int i=threadIdx.x;i < n;i+=blockDim.x) {
int posw_val = posw[i];
buffer[posw_val] = shBuffer[i];
}
}
// ############################################################################
// ############################################################################
// ############################################################################
static int SM_major = 0;
void volMmk(int nthread, char* filebase);
void copyPCL(int nthread, int niter);
void writePCL(int nthread, int niter);
template <typename T> void pChase(int nthread, int stride, int offset);
void memoryTransactions();
template <typename T> void memoryLatency(int nwarp, int nsm);
void clearCache(int* buffer, const int bufferSize);
void cyclesPerOperation();
template <typename T> void memoryWrite(int stride, int offset);
void memoryWrite2(int stride, int offset);
void cacheLine();
void printDeviceInfo();
int main(int argc, char *argv[]) {
int nthread = 1;
int stride = 1;
int offset = 0;
int niter = 0;
int deviceID = 0;
int size = 4;
char file[64];
bool arg_ok = true;
if (argc >= 2) {
int i = 1;
while (i < argc) {
if (strcmp(argv[i], "-stride") == 0) {
sscanf(argv[i+1], "%d", &stride);
i += 2;
} else if (strcmp(argv[i], "-nthread") == 0) {
sscanf(argv[i+1], "%d", &nthread);
i += 2;
} else if (strcmp(argv[i], "-offset") == 0) {
sscanf(argv[i+1], "%d", &offset);
i += 2;
} else if (strcmp(argv[i], "-size") == 0) {
sscanf(argv[i+1], "%d", &size);
i += 2;
} else if (strcmp(argv[i], "-niter") == 0) {
sscanf(argv[i+1], "%d", &niter);
i += 2;
} else if (strcmp(argv[i], "-file") == 0) {
sscanf(argv[i+1], "%s", file);
i += 2;
} else if (strcmp(argv[i], "-device") == 0) {
sscanf(argv[i+1], "%d", &deviceID);
i += 2;
} else {
arg_ok = false;
break;
}
}
} else if (argc > 1) {
arg_ok = false;
}
if (!arg_ok) {
printf("cumb [options]\n");
printf("Options:\n");
printf("-nthread [nthread]\n");
printf("-stride [stride]\n");
printf("-offset [offset]\n");
printf("-size [size]\n");
printf("-niter [niter]\n");
printf("-file [file]\n");
printf("-device [device]\n");
return 1;
}
cudaCheck(hipSetDevice(deviceID));
printDeviceInfo();
int* buffer = NULL;
int bufferSize = 1000000;
allocate_device<int>(&buffer, bufferSize);
// for (int i=1;i <= 1;i++) {
// clearCache(buffer, bufferSize);
// memoryLatency<long long int>(i, 1);
// }
// clearCache(buffer, bufferSize);
// memoryLatency<int>(1);
// clearCache(buffer, bufferSize);
// memoryWrite2(stride, offset);
// clearCache(buffer, bufferSize);
// memoryTransactions();
// clearCache(buffer, bufferSize);
// writePCL(nthread, niter);
// clearCache(buffer, bufferSize);
// copyPCL(nthread, niter);
// volMmk(nthread, file);
#if 1
if (nthread == 0) {
for (int i=1;i <= 32;i++) {
clearCache(buffer, bufferSize);
if (size == 4)
pChase<int>(i, stride, offset);
else
pChase<long long int>(i, stride, offset);
}
} else {
clearCache(buffer, bufferSize);
if (size == 4)
pChase<int>(nthread, stride, offset);
else
pChase<long long int>(nthread, stride, offset);
}
#endif
// clearCache(buffer, bufferSize);
// pChase<long long int>(stride);
// clearCache(buffer, bufferSize);
// memoryWrite<long long int>(stride, offset);
// clearCache(buffer, bufferSize);
// cacheLine();
deallocate_device<int>(&buffer);
// cyclesPerOperation();
cudaCheck(hipDeviceReset());
return 0;
}
void volMmk(int nthread, char* filebase) {
char filename[256];
sprintf(filename, "%sr.txt", filebase);
std::vector<int> h_posr = loadPos(filename);
sprintf(filename, "%sw.txt", filebase);
std::vector<int> h_posw = loadPos(filename);
if (h_posr.size() == 0 || h_posr.size() != h_posw.size()) return;
int* posr;
int* posw;
allocate_device<int>(&posr, h_posr.size());
allocate_device<int>(&posw, h_posw.size());
copy_HtoD_sync<int>(h_posr.data(), posr, h_posr.size());
copy_HtoD_sync<int>(h_posw.data(), posw, h_posw.size());
int maxpos = ::max(h_posr.back(), h_posw.back());
double* buffer;
allocate_device<double>(&buffer, maxpos + 1);
// writeVolMmk<<< 1, nthread >>>(posw, h_posw.size(), buffer);
hipLaunchKernelGGL(( readWriteVolMmk), dim3(1), dim3(nthread), h_posr.size()*sizeof(double) , 0, posr, posw, h_posw.size(), buffer);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
deallocate_device<int>(&posr);
deallocate_device<int>(&posw);
deallocate_device<double>(&buffer);
}
void copyPCL(int nthread, int niter) {
int arraySize = 32*1024*1024;
int* arraySrc;
int* arrayDst;
allocate_device<int>(&arraySrc, arraySize);
allocate_device<int>(&arrayDst, arraySize);
hipLaunchKernelGGL(( copyPCLKernel), dim3(1), dim3(nthread) , 0, 0, arraySrc, arrayDst, niter);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
deallocate_device<int>(&arraySrc);
deallocate_device<int>(&arrayDst);
}
void writePCL(int nthread, int niter) {
int arraySize = 32*1024*1024;
int* array;
allocate_device<int>(&array, arraySize);
hipLaunchKernelGGL(( writePCLKernel), dim3(1), dim3(nthread) , 0, 0, array, niter);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
deallocate_device<int>(&array);
}
//
// Count number of global memory transactions per one request for potentially
// scattered accesses to elements listed in pos
// NOTE: Assumes pos is sorted
//
int glTransactions(const int* pos, const int n, const int accWidth) {
int count = 0;
int iseg_prev = -1;
for (int i=0;i < n;i++) {
int iseg = pos[i]/accWidth;
count += (iseg != iseg_prev);
iseg_prev = iseg;
}
return count;
}
//
// Stride = stride in bytes
//
template <typename T>
void pChase(int nthread, int stride, int offset) {
int niter = 320;
int skip = stride/sizeof(T);
int arraySize = (niter + 1)*skip*nthread*100;
// printf("arraySize %dMB\n", arraySize*sizeof(T)/(1024*1024));
T* array;
allocate_device<T>(&array, arraySize);
T* h_array = new T[arraySize];
for (int i=0;i < arraySize;i++) {
h_array[i] = (T)(-1);
}
// for (int i=0;i < arraySize;i++) {
// int iblock = i/nthread;
// int ithread = i % nthread;
// h_array[i] = (T)((iblock + stride)*nthread + ithread) % arraySize;
// }
for (int i=0;i < niter*skip*nthread;i+=skip) {
if (skip*nthread + i + offset > arraySize) {
printf("ERROR\n");
}
h_array[i + offset] = (skip*nthread + i + offset) % arraySize;
}
// int k = 0;
// for (int j=0;j < 33;j++) {
// for (int i=0;i < 32;i++) {
// int val = (int)h_array[k++];
// if (val < 0) {
// printf("X ");
// } else {
// printf("%d ", val);
// }
// }
// printf("\n");
// }
// int* pos = new int[nthread];
// for (int t=0;t < nthread;t++) {
// pos[t] = t*skip;
// }
// for (int t=0;t < nthread;t++) {
// printf("%d ", pos[t]);
// }
// printf("\n");
// int accWidth = 128/sizeof(T);
// int tran = glTransactions(pos, nthread, accWidth);
// printf("tran %d\n", tran);
// delete [] pos;
copy_HtoD_sync<T>(h_array, array, arraySize);
cudaCheck(hipDeviceSynchronize());
delete [] h_array;
if (SM_major >= 5) {
hipLaunchKernelGGL(( pChaseMaxwellKernel<T>) , dim3(1), dim3(nthread), niter*sizeof(T) , 0, array, skip, offset, niter);
// pChaseMaxwellKernel2<T> <<< 1, nthread, niter*sizeof(T)*nthread >>>(array, skip, offset, niter);
} else {
hipLaunchKernelGGL(( pChaseKernel<T, 320>) , dim3(1), dim3(nthread) , 0, 0, array, skip, offset);
// pChaseKernel2<T> <<< 1, nthread, niter*sizeof(int) + niter*sizeof(T)*nthread >>>(array, skip, offset, niter);
}
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
deallocate_device<T>(&array);
}
void cacheLine() {
double* buffer = NULL;
allocate_device<double>(&buffer, 256);
cudaCheck(hipDeviceSynchronize());
int nthread = 32;
int nblock = 1;
hipLaunchKernelGGL(( cacheLineKernel) , dim3(nblock), dim3(nthread) , 0, 0, buffer, &buffer[128]);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
deallocate_device<double>(&buffer);
}
void memoryTransactions() {
float* buffer = NULL;
allocate_device<float>(&buffer, 1024);
int nthread = 1;
int nblock =1;
hipLaunchKernelGGL(( memoryTransactionKernel<float>) , dim3(nblock), dim3(nthread) , 0, 0, buffer);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( memoryTransactionKernel<float>) , dim3(nblock), dim3(nthread) , 0, 0, buffer);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( memoryTransactionKernel<float>) , dim3(nblock), dim3(nthread) , 0, 0, buffer);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( memoryTransactionKernel<double>) , dim3(nblock), dim3(nthread) , 0, 0, (double *)buffer);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( memoryTransactionKernel<double>) , dim3(nblock), dim3(nthread) , 0, 0, (double *)buffer);
cudaCheck(hipGetLastError());
hipLaunchKernelGGL(( memoryTransactionKernel<double>) , dim3(nblock), dim3(nthread) , 0, 0, (double *)buffer);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
deallocate_device<float>(&buffer);
}
template <typename T>
void memoryWrite(int stride, int offset) {
int nwrite = 31249408/2;
int bufferSize = nwrite*34;
T* buffer = NULL;
allocate_device<T>(&buffer, bufferSize);
printf("bufferSize %f GB\n", bufferSize*sizeof(T)/1000000000.0f);
cudaCheck(hipDeviceSynchronize());
int nthread = 512;
int nblock = nwrite/nthread;
int numActiveBlock;
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock, memoryWriteKernel<T>, nthread, 0);
printf("nthread %d nblock %d numActiveBlock %d\n", nthread, nblock, numActiveBlock);
hipLaunchKernelGGL(( memoryWriteKernel<T>) , dim3(nblock), dim3(nthread) , 0, 0, buffer, nwrite, stride, offset);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
long long int bytesWritten = nwrite*sizeof(T);
printf("wrote %lld bytes using stride %d and offset %d\n", bytesWritten, stride, offset);
deallocate_device<T>(&buffer);
}
void memoryWrite2(int stride, int offset) {
int nwrite = 31249408/2;
int bufferSize = nwrite*34;
char* buffer = NULL;
allocate_device<char>(&buffer, bufferSize);
printf("bufferSize %f GB\n", bufferSize*sizeof(char)/1000000000.0f);
cudaCheck(hipDeviceSynchronize());
int nthread = 512;
int nblock = nwrite/nthread;
int numActiveBlock;
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock, memoryWriteKernel2, nthread, 0);
printf("nthread %d nblock %d numActiveBlock %d\n", nthread, nblock, numActiveBlock);
hipLaunchKernelGGL(( memoryWriteKernel2) , dim3(nblock), dim3(nthread) , 0, 0, buffer, nwrite, stride, offset);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
printf("wrote using stride %d and offset %d\n", stride, offset);
deallocate_device<char>(&buffer);
}
template <typename T> void memoryLatency(int nwarp, int nsm) {
T* bufferIn = NULL;
T* bufferOut = NULL;
allocate_device<T>(&bufferIn, 16384*nwarp*nsm);
allocate_device<T>(&bufferOut, 16384*nwarp*nsm);
cudaCheck(hipDeviceSynchronize());
// printf("%d\n", nwarp);
// int nthread = 32*nwarp;
// int nblock = nsm;
// int shmemsize = nthread*sizeof(int);
// memoryLatencyKernel<T> <<< nblock, nthread, shmemsize >>>(bufferIn, bufferOut);
// cudaCheck(hipGetLastError());
int nthread = 32*nwarp;
int nblock = nsm;
int shmemsize = nthread*sizeof(int);
hipLaunchKernelGGL(( memoryLatencyKernel2<T>) , dim3(nblock), dim3(nthread), shmemsize , 0, bufferOut);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
// printf("\n");
deallocate_device<T>(&bufferIn);
deallocate_device<T>(&bufferOut);
}
void clearCache(int* buffer, const int bufferSize) {
cudaCheck(hipDeviceSynchronize());
int nthread = 1024;
int nblock = (bufferSize - 1)/nthread + 1;
hipLaunchKernelGGL(( clearCacheKernel) , dim3(nblock), dim3(nthread) , 0, 0, buffer, bufferSize);
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
}
void cyclesPerOperation() {
cudaCheck(hipDeviceSynchronize());
int nthread = 32;
int nblock = 1;
hipLaunchKernelGGL(( cyclesPerOperationKernel) , dim3(nblock), dim3(nthread) , 0, 0, );
cudaCheck(hipGetLastError());
cudaCheck(hipDeviceSynchronize());
}
void printDeviceInfo() {
int deviceID;
cudaCheck(hipGetDevice(&deviceID));
hipDeviceProp_t prop;
cudaCheck(hipGetDeviceProperties(&prop, deviceID));
hipSharedMemConfig pConfig;
cudaCheck(hipDeviceGetSharedMemConfig(&pConfig));
int shMemBankSize = 4;
if (pConfig == hipSharedMemBankSizeEightByte) shMemBankSize = 8;
double mem_BW = (double)(prop.memoryClockRate*2*(prop.memoryBusWidth/8))/1.0e6;
SM_major = prop.major;
printf("Using %s SM version %d.%d\n", prop.name, prop.major, prop.minor);
printf("Clock %1.3lfGhz numSM %d ECC %d mem BW %1.2lfGB/s shMemBankSize %dB\n", (double)prop.clockRate/1e6,
prop.multiProcessorCount, prop.ECCEnabled, mem_BW, shMemBankSize);
printf("L2 %1.2lfMB\n", (double)prop.l2CacheSize/(double)(1024*1024));
}
| 38fee62456419f65cfcac268a1ccae1e04ff4398.cu | /******************************************************************************
MIT License
Copyright (c) 2016 Antti-Pekka Hynninen
Copyright (c) 2016 Oak Ridge National Laboratory (UT-Batelle)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*******************************************************************************/
//
// Micro-benchmark for memory operations
//
#include <cstdio>
#include "CudaUtils.h"
#include "cumbUtils.h"
__global__ void clearCacheKernel(int* buffer, const int bufferSize) {
for (int t=threadIdx.x + blockIdx.x*blockDim.x;t < bufferSize;t+=blockDim.x*gridDim.x) {
buffer[t] = t;
}
}
template<typename T>
__global__ void memoryTransactionKernel(T* buffer) {
__shared__ T sh_a;
int i = threadIdx.x;
long long int start = clock64();
T a = buffer[i];
sh_a = a;
long long int end = clock64();
printf("%d %f\n", (int)(end - start), (float)sh_a);
}
template <typename T, int niter>
__global__ void pChaseKernel(T* array, const int skip, const int offset) {
__shared__ int duration[niter];
__shared__ T dummy[niter];
{
T j = threadIdx.x*skip + offset;
for (int it=0;it < niter;it++) {
int start = clock();
j = array[j];
dummy[it] = j;
int end = clock();
duration[it] = end - start;
}
}
if (threadIdx.x == 0) {
#if 1
int total_duration = 0;
int total_duration2 = 0;
int total_dummy = 0;
for (int it=1;it < niter;it++) {
int d = duration[it];
total_duration += d;
total_duration2 += d*d;
total_dummy += (int)dummy[it];
}
float avg_duration = (float)total_duration/(float)(niter - 1);
float avg_duration2 = (float)total_duration2/(float)(niter - 1);
float std_duration = sqrtf(avg_duration2 - avg_duration*avg_duration);
printf("%1.2f %1.2f %d\n", avg_duration, std_duration, total_dummy);
#else
for (int it=0;it < niter;it++) {
printf("%d %d\n", duration[it], (int)dummy[it]);
}
#endif
}
}
template <typename T>
__global__ void pChaseKernel2(T* array, const int skip, const int offset, const int niter) {
extern __shared__ char dummychar[];
volatile int* duration = (int*)dummychar;
volatile T* dummy = (T*)&dummychar[niter*sizeof(int)];
{
T j = threadIdx.x*skip + offset;
for (int it=0;it < niter;it++) {
// int start = clock();
duration[it] = clock();
j = array[j];
dummy[it*blockDim.x + threadIdx.x] = j;
// int end = clock();
// duration[it] = end - start;
duration[it] = clock() - duration[it];
}
}
if (threadIdx.x == 0) {
#if 1
int total_duration = 0;
int total_duration2 = 0;
int total_dummy = 0;
for (int it=1;it < niter;it++) {
int d = duration[it];
total_duration += d;
total_duration2 += d*d;
total_dummy += (int)dummy[it];
}
float avg_duration = (float)total_duration/(float)(niter - 1);
float avg_duration2 = (float)total_duration2/(float)(niter - 1);
float std_duration = sqrtf(avg_duration2 - avg_duration*avg_duration);
printf("%1.2f %1.2f %d\n", avg_duration, std_duration, total_dummy);
#else
for (int it=0;it < niter;it++) {
printf("%d %d\n", duration[it], (int)dummy[it]);
}
#endif
}
}
template <typename T>
__global__ void pChaseMaxwellKernel(T* array, const int skip, const int offset, const int niter) {
extern __shared__ char dummychar[];
T* dummy = (T*)dummychar;
int start = clock();
T j = threadIdx.x*skip + offset;
for (int it=0;it < niter;it++) {
j = array[j];
dummy[it] = j;
}
int end = clock();
int duration = (int)(end - start);
if (threadIdx.x == 0) {
int max_dummy = 0;
for (int it=0;it < niter;it++) max_dummy = max(max_dummy, (int)dummy[it]);
printf("%1.2f %d\n", (float)duration/(float)niter, max_dummy);
}
}
template <typename T>
__global__ void pChaseMaxwellKernel2(T* array, const int skip, const int offset, const int niter) {
extern __shared__ char dummychar[];
volatile T* dummy = (T*)dummychar;
int start = clock();
T j = threadIdx.x*skip + offset;
for (int it=0;it < niter;it++) {
T j0 = j;
j = array[j];
// printf("%d %d | %d -> %d\n", it, threadIdx.x, j0, j);
dummy[it*blockDim.x + threadIdx.x] = j;
}
int end = clock();
int duration = (int)(end - start);
if (threadIdx.x == 0) {
int max_dummy = 0;
for (int it=0;it < niter;it++) {
for (int t=0;t < blockDim.x;t++) {
max_dummy = max(max_dummy, (int)dummy[it*blockDim.x + t]);
}
}
printf("%1.2f %d\n", (float)duration/(float)niter, max_dummy);
}
}
__global__ void writePCLKernel(int* array, const int niter) {
for (int it=0;it < niter;it++) {
array[it*32 + threadIdx.x] = 1;
}
}
__global__ void copyPCLKernel(int* src, int* dst, const int niter) {
__shared__ int shbuf[32];
for (int it=0;it < niter;it++) {
__syncthreads();
shbuf[threadIdx.x] = src[it*32 + threadIdx.x];
__syncthreads();
dst[it*32 + threadIdx.x] = shbuf[threadIdx.x];
}
}
template <typename T>
__global__ void memoryLatencyKernel(T* bufferIn, T* bufferOut) {
extern __shared__ int shCycles[];
// int p = threadIdx.x*(128/sizeof(T)) + 1 + blockIdx.x*1024;
int p = threadIdx.x;
//if (threadIdx.x % 32 > 0) p += 128;
long long int start = clock64();
T a = bufferIn[p];
long long int end = clock64();
shCycles[threadIdx.x] = (int)(end - start);
__syncthreads();
if (blockIdx.x == 0 && threadIdx.x == 0) {
int minCycle = (1 << 30);
int maxCycle = 0;
int aveCycle = 0;
for (int i=0;i < blockDim.x;i++) {
minCycle = min(minCycle, shCycles[i]);
maxCycle = max(maxCycle, shCycles[i]);
aveCycle += shCycles[i];
}
printf("%d %d %d\n", minCycle, maxCycle, aveCycle/blockDim.x);
}
bufferOut[threadIdx.x] = a;
}
template <typename T>
__global__ void memoryLatencyKernel2(T* bufferOut) {
extern __shared__ int shCycles[];
int p = threadIdx.x + 1;
long long int start = clock64();
bufferOut[p] = 1.2;
long long int end = clock64();
shCycles[threadIdx.x] = (int)(end - start);
__syncthreads();
if (blockIdx.x == 0 && threadIdx.x == 0) {
int minCycle = (1 << 30);
int maxCycle = 0;
int aveCycle = 0;
for (int i=0;i < blockDim.x;i++) {
minCycle = min(minCycle, shCycles[i]);
maxCycle = max(maxCycle, shCycles[i]);
aveCycle += shCycles[i];
}
printf("%d %d %d\n", minCycle, maxCycle, aveCycle/blockDim.x);
}
}
template <typename T>
__global__ void memoryWriteKernel(T* buffer, const int nwrite, const int stride, const int offset) {
// for (int t=threadIdx.x + blockIdx.x*blockDim.x;t < nwrite;t+=blockDim.x*gridDim.x) {
int t = threadIdx.x + blockIdx.x*blockDim.x;
int wid = t / 32;
int tid = t % 32;
// T a = (t*nwrite*blockDim.x) + nwrite*threadIdx.x;
// buffer[t*stride + offset] = a;
buffer[wid*stride + tid + offset] = 1;
// }
}
__global__ void memoryWriteKernel2(char* buffer, const int nwrite, const int stride, const int offset) {
int t = threadIdx.x + blockIdx.x*blockDim.x;
int wid = t / 32;
int tid = t % 32;
if (tid == 0) buffer[wid*stride + offset] = 1;
}
__global__ void cyclesPerOperationKernel() {
// int a = threadIdx.x;
// int b = blockIdx.x;
long long int start = clock64();
int a = threadIdx.x;
int b = blockIdx.x;
b += a;
a *= 17;
b += 3;
a -= b;
long long int end = clock64();
printf("threadIdx.x %d cycles %lld a %d b %d\n", threadIdx.x, end-start, a, b);
}
__global__ void cacheLineKernel(double* buffer, double* res) {
int t = threadIdx.x;
//
double a = buffer[t+1];
//
double sum = 0.0;
for (int i=0;i < 32;i++) {
sum += __shfl(a, i);
}
if (threadIdx.x == 0) res[0] = sum;
}
__global__ void writeVolMmk(const int* pos, const int n, double* buffer) {
for (int i=threadIdx.x;i < n;i+=blockDim.x) {
int posval = pos[i];
buffer[posval] = 1.0;
}
}
__global__ void readWriteVolMmk(const int* posr, const int* posw, const int n, double* buffer) {
extern __shared__ double shBuffer[];
__syncthreads();
for (int i=threadIdx.x;i < n;i+=blockDim.x) {
int posr_val = posr[i];
shBuffer[i] = buffer[posr_val];
}
__syncthreads();
for (int i=threadIdx.x;i < n;i+=blockDim.x) {
int posw_val = posw[i];
buffer[posw_val] = shBuffer[i];
}
}
// ############################################################################
// ############################################################################
// ############################################################################
static int SM_major = 0;
void volMmk(int nthread, char* filebase);
void copyPCL(int nthread, int niter);
void writePCL(int nthread, int niter);
template <typename T> void pChase(int nthread, int stride, int offset);
void memoryTransactions();
template <typename T> void memoryLatency(int nwarp, int nsm);
void clearCache(int* buffer, const int bufferSize);
void cyclesPerOperation();
template <typename T> void memoryWrite(int stride, int offset);
void memoryWrite2(int stride, int offset);
void cacheLine();
void printDeviceInfo();
int main(int argc, char *argv[]) {
int nthread = 1;
int stride = 1;
int offset = 0;
int niter = 0;
int deviceID = 0;
int size = 4;
char file[64];
bool arg_ok = true;
if (argc >= 2) {
int i = 1;
while (i < argc) {
if (strcmp(argv[i], "-stride") == 0) {
sscanf(argv[i+1], "%d", &stride);
i += 2;
} else if (strcmp(argv[i], "-nthread") == 0) {
sscanf(argv[i+1], "%d", &nthread);
i += 2;
} else if (strcmp(argv[i], "-offset") == 0) {
sscanf(argv[i+1], "%d", &offset);
i += 2;
} else if (strcmp(argv[i], "-size") == 0) {
sscanf(argv[i+1], "%d", &size);
i += 2;
} else if (strcmp(argv[i], "-niter") == 0) {
sscanf(argv[i+1], "%d", &niter);
i += 2;
} else if (strcmp(argv[i], "-file") == 0) {
sscanf(argv[i+1], "%s", file);
i += 2;
} else if (strcmp(argv[i], "-device") == 0) {
sscanf(argv[i+1], "%d", &deviceID);
i += 2;
} else {
arg_ok = false;
break;
}
}
} else if (argc > 1) {
arg_ok = false;
}
if (!arg_ok) {
printf("cumb [options]\n");
printf("Options:\n");
printf("-nthread [nthread]\n");
printf("-stride [stride]\n");
printf("-offset [offset]\n");
printf("-size [size]\n");
printf("-niter [niter]\n");
printf("-file [file]\n");
printf("-device [device]\n");
return 1;
}
cudaCheck(cudaSetDevice(deviceID));
printDeviceInfo();
int* buffer = NULL;
int bufferSize = 1000000;
allocate_device<int>(&buffer, bufferSize);
// for (int i=1;i <= 1;i++) {
// clearCache(buffer, bufferSize);
// memoryLatency<long long int>(i, 1);
// }
// clearCache(buffer, bufferSize);
// memoryLatency<int>(1);
// clearCache(buffer, bufferSize);
// memoryWrite2(stride, offset);
// clearCache(buffer, bufferSize);
// memoryTransactions();
// clearCache(buffer, bufferSize);
// writePCL(nthread, niter);
// clearCache(buffer, bufferSize);
// copyPCL(nthread, niter);
// volMmk(nthread, file);
#if 1
if (nthread == 0) {
for (int i=1;i <= 32;i++) {
clearCache(buffer, bufferSize);
if (size == 4)
pChase<int>(i, stride, offset);
else
pChase<long long int>(i, stride, offset);
}
} else {
clearCache(buffer, bufferSize);
if (size == 4)
pChase<int>(nthread, stride, offset);
else
pChase<long long int>(nthread, stride, offset);
}
#endif
// clearCache(buffer, bufferSize);
// pChase<long long int>(stride);
// clearCache(buffer, bufferSize);
// memoryWrite<long long int>(stride, offset);
// clearCache(buffer, bufferSize);
// cacheLine();
deallocate_device<int>(&buffer);
// cyclesPerOperation();
cudaCheck(cudaDeviceReset());
return 0;
}
void volMmk(int nthread, char* filebase) {
char filename[256];
sprintf(filename, "%sr.txt", filebase);
std::vector<int> h_posr = loadPos(filename);
sprintf(filename, "%sw.txt", filebase);
std::vector<int> h_posw = loadPos(filename);
if (h_posr.size() == 0 || h_posr.size() != h_posw.size()) return;
int* posr;
int* posw;
allocate_device<int>(&posr, h_posr.size());
allocate_device<int>(&posw, h_posw.size());
copy_HtoD_sync<int>(h_posr.data(), posr, h_posr.size());
copy_HtoD_sync<int>(h_posw.data(), posw, h_posw.size());
int maxpos = std::max(h_posr.back(), h_posw.back());
double* buffer;
allocate_device<double>(&buffer, maxpos + 1);
// writeVolMmk<<< 1, nthread >>>(posw, h_posw.size(), buffer);
readWriteVolMmk<<< 1, nthread, h_posr.size()*sizeof(double) >>>(posr, posw, h_posw.size(), buffer);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
deallocate_device<int>(&posr);
deallocate_device<int>(&posw);
deallocate_device<double>(&buffer);
}
void copyPCL(int nthread, int niter) {
int arraySize = 32*1024*1024;
int* arraySrc;
int* arrayDst;
allocate_device<int>(&arraySrc, arraySize);
allocate_device<int>(&arrayDst, arraySize);
copyPCLKernel<<< 1, nthread >>>(arraySrc, arrayDst, niter);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
deallocate_device<int>(&arraySrc);
deallocate_device<int>(&arrayDst);
}
void writePCL(int nthread, int niter) {
int arraySize = 32*1024*1024;
int* array;
allocate_device<int>(&array, arraySize);
writePCLKernel<<< 1, nthread >>>(array, niter);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
deallocate_device<int>(&array);
}
//
// Count number of global memory transactions per one request for potentially
// scattered accesses to elements listed in pos
// NOTE: Assumes pos is sorted
//
int glTransactions(const int* pos, const int n, const int accWidth) {
int count = 0;
int iseg_prev = -1;
for (int i=0;i < n;i++) {
int iseg = pos[i]/accWidth;
count += (iseg != iseg_prev);
iseg_prev = iseg;
}
return count;
}
//
// Stride = stride in bytes
//
template <typename T>
void pChase(int nthread, int stride, int offset) {
int niter = 320;
int skip = stride/sizeof(T);
int arraySize = (niter + 1)*skip*nthread*100;
// printf("arraySize %dMB\n", arraySize*sizeof(T)/(1024*1024));
T* array;
allocate_device<T>(&array, arraySize);
T* h_array = new T[arraySize];
for (int i=0;i < arraySize;i++) {
h_array[i] = (T)(-1);
}
// for (int i=0;i < arraySize;i++) {
// int iblock = i/nthread;
// int ithread = i % nthread;
// h_array[i] = (T)((iblock + stride)*nthread + ithread) % arraySize;
// }
for (int i=0;i < niter*skip*nthread;i+=skip) {
if (skip*nthread + i + offset > arraySize) {
printf("ERROR\n");
}
h_array[i + offset] = (skip*nthread + i + offset) % arraySize;
}
// int k = 0;
// for (int j=0;j < 33;j++) {
// for (int i=0;i < 32;i++) {
// int val = (int)h_array[k++];
// if (val < 0) {
// printf("X ");
// } else {
// printf("%d ", val);
// }
// }
// printf("\n");
// }
// int* pos = new int[nthread];
// for (int t=0;t < nthread;t++) {
// pos[t] = t*skip;
// }
// for (int t=0;t < nthread;t++) {
// printf("%d ", pos[t]);
// }
// printf("\n");
// int accWidth = 128/sizeof(T);
// int tran = glTransactions(pos, nthread, accWidth);
// printf("tran %d\n", tran);
// delete [] pos;
copy_HtoD_sync<T>(h_array, array, arraySize);
cudaCheck(cudaDeviceSynchronize());
delete [] h_array;
if (SM_major >= 5) {
pChaseMaxwellKernel<T> <<< 1, nthread, niter*sizeof(T) >>>(array, skip, offset, niter);
// pChaseMaxwellKernel2<T> <<< 1, nthread, niter*sizeof(T)*nthread >>>(array, skip, offset, niter);
} else {
pChaseKernel<T, 320> <<< 1, nthread >>>(array, skip, offset);
// pChaseKernel2<T> <<< 1, nthread, niter*sizeof(int) + niter*sizeof(T)*nthread >>>(array, skip, offset, niter);
}
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
deallocate_device<T>(&array);
}
void cacheLine() {
double* buffer = NULL;
allocate_device<double>(&buffer, 256);
cudaCheck(cudaDeviceSynchronize());
int nthread = 32;
int nblock = 1;
cacheLineKernel <<< nblock, nthread >>>(buffer, &buffer[128]);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
deallocate_device<double>(&buffer);
}
void memoryTransactions() {
float* buffer = NULL;
allocate_device<float>(&buffer, 1024);
int nthread = 1;
int nblock =1;
memoryTransactionKernel<float> <<< nblock, nthread >>>(buffer);
cudaCheck(cudaGetLastError());
memoryTransactionKernel<float> <<< nblock, nthread >>>(buffer);
cudaCheck(cudaGetLastError());
memoryTransactionKernel<float> <<< nblock, nthread >>>(buffer);
cudaCheck(cudaGetLastError());
memoryTransactionKernel<double> <<< nblock, nthread >>>((double *)buffer);
cudaCheck(cudaGetLastError());
memoryTransactionKernel<double> <<< nblock, nthread >>>((double *)buffer);
cudaCheck(cudaGetLastError());
memoryTransactionKernel<double> <<< nblock, nthread >>>((double *)buffer);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
deallocate_device<float>(&buffer);
}
template <typename T>
void memoryWrite(int stride, int offset) {
int nwrite = 31249408/2;
int bufferSize = nwrite*34;
T* buffer = NULL;
allocate_device<T>(&buffer, bufferSize);
printf("bufferSize %f GB\n", bufferSize*sizeof(T)/1000000000.0f);
cudaCheck(cudaDeviceSynchronize());
int nthread = 512;
int nblock = nwrite/nthread;
int numActiveBlock;
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock, memoryWriteKernel<T>, nthread, 0);
printf("nthread %d nblock %d numActiveBlock %d\n", nthread, nblock, numActiveBlock);
memoryWriteKernel<T> <<< nblock, nthread >>>(buffer, nwrite, stride, offset);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
long long int bytesWritten = nwrite*sizeof(T);
printf("wrote %lld bytes using stride %d and offset %d\n", bytesWritten, stride, offset);
deallocate_device<T>(&buffer);
}
void memoryWrite2(int stride, int offset) {
int nwrite = 31249408/2;
int bufferSize = nwrite*34;
char* buffer = NULL;
allocate_device<char>(&buffer, bufferSize);
printf("bufferSize %f GB\n", bufferSize*sizeof(char)/1000000000.0f);
cudaCheck(cudaDeviceSynchronize());
int nthread = 512;
int nblock = nwrite/nthread;
int numActiveBlock;
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numActiveBlock, memoryWriteKernel2, nthread, 0);
printf("nthread %d nblock %d numActiveBlock %d\n", nthread, nblock, numActiveBlock);
memoryWriteKernel2 <<< nblock, nthread >>>(buffer, nwrite, stride, offset);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
printf("wrote using stride %d and offset %d\n", stride, offset);
deallocate_device<char>(&buffer);
}
template <typename T> void memoryLatency(int nwarp, int nsm) {
T* bufferIn = NULL;
T* bufferOut = NULL;
allocate_device<T>(&bufferIn, 16384*nwarp*nsm);
allocate_device<T>(&bufferOut, 16384*nwarp*nsm);
cudaCheck(cudaDeviceSynchronize());
// printf("%d\n", nwarp);
// int nthread = 32*nwarp;
// int nblock = nsm;
// int shmemsize = nthread*sizeof(int);
// memoryLatencyKernel<T> <<< nblock, nthread, shmemsize >>>(bufferIn, bufferOut);
// cudaCheck(cudaGetLastError());
int nthread = 32*nwarp;
int nblock = nsm;
int shmemsize = nthread*sizeof(int);
memoryLatencyKernel2<T> <<< nblock, nthread, shmemsize >>>(bufferOut);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
// printf("\n");
deallocate_device<T>(&bufferIn);
deallocate_device<T>(&bufferOut);
}
void clearCache(int* buffer, const int bufferSize) {
cudaCheck(cudaDeviceSynchronize());
int nthread = 1024;
int nblock = (bufferSize - 1)/nthread + 1;
clearCacheKernel <<< nblock, nthread >>>(buffer, bufferSize);
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
}
void cyclesPerOperation() {
cudaCheck(cudaDeviceSynchronize());
int nthread = 32;
int nblock = 1;
cyclesPerOperationKernel <<< nblock, nthread >>>();
cudaCheck(cudaGetLastError());
cudaCheck(cudaDeviceSynchronize());
}
void printDeviceInfo() {
int deviceID;
cudaCheck(cudaGetDevice(&deviceID));
cudaDeviceProp prop;
cudaCheck(cudaGetDeviceProperties(&prop, deviceID));
cudaSharedMemConfig pConfig;
cudaCheck(cudaDeviceGetSharedMemConfig(&pConfig));
int shMemBankSize = 4;
if (pConfig == cudaSharedMemBankSizeEightByte) shMemBankSize = 8;
double mem_BW = (double)(prop.memoryClockRate*2*(prop.memoryBusWidth/8))/1.0e6;
SM_major = prop.major;
printf("Using %s SM version %d.%d\n", prop.name, prop.major, prop.minor);
printf("Clock %1.3lfGhz numSM %d ECC %d mem BW %1.2lfGB/s shMemBankSize %dB\n", (double)prop.clockRate/1e6,
prop.multiProcessorCount, prop.ECCEnabled, mem_BW, shMemBankSize);
printf("L2 %1.2lfMB\n", (double)prop.l2CacheSize/(double)(1024*1024));
}
|
0d84a61d24aeeb08221a18245fe7110c5e91ef96.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
#define TYPE int
using namespace cooperative_groups;
__global__ void kernel(){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
}
__host__ void fillMatrix(TYPE *a, int n){
for(int i = 0; i < n * n; i++){
a[i] = rand()%3-1;
}
}
__host__ void fillVector(TYPE *a, int n){
for(int i = 0; i < n; i++){
a[i] = rand()%3-1;
}
}
int main(int argc, char **argv){
if(argc != 3){
printf("Error!, Ejecutar ./prog <OPTION><N> <seed>\nOPTION:\n1 = VECTOR\n2 = MATRIX\n");
exit(1);
}
int n = atoi(argv[1]);
int seed = atoi(argv[2]);
srand(seed);
dim3 block(BSIZE2D, BSIZE2D, 1);
dim3 grid( (n + block.x - 1)/block.x, (n + block.y - 1)/block.y );
return 0;
}
| 0d84a61d24aeeb08221a18245fe7110c5e91ef96.cu | #include <stdio.h>
#include <cuda.h>
#include <cooperative_groups.h>
#define TYPE int
using namespace cooperative_groups;
__global__ void kernel(){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
}
__host__ void fillMatrix(TYPE *a, int n){
for(int i = 0; i < n * n; i++){
a[i] = rand()%3-1;
}
}
__host__ void fillVector(TYPE *a, int n){
for(int i = 0; i < n; i++){
a[i] = rand()%3-1;
}
}
int main(int argc, char **argv){
if(argc != 3){
printf("Error!, Ejecutar ./prog <OPTION><N> <seed>\nOPTION:\n1 = VECTOR\n2 = MATRIX\n");
exit(1);
}
int n = atoi(argv[1]);
int seed = atoi(argv[2]);
srand(seed);
dim3 block(BSIZE2D, BSIZE2D, 1);
dim3 grid( (n + block.x - 1)/block.x, (n + block.y - 1)/block.y );
return 0;
}
|
3c3eb1f7bfa0679f86ed2a01d51df55f0429e68b.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
#include "im2col.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "generic/SpatialConvolutionLocal.cu"
#include "THHGenerateFloatTypes.h"
| 3c3eb1f7bfa0679f86ed2a01d51df55f0429e68b.cu | #include "THCUNN.h"
#include "common.h"
#include "im2col.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "generic/SpatialConvolutionLocal.cu"
#include "THCGenerateFloatTypes.h"
|
29691a6548419587fc59b8b429cf87445f68037f.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <assert.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
void profileCopies(float *h_a,
float *h_b,
float *d,
unsigned int n,
char *desc)
{
printf("\n%s transfers\n", desc);
unsigned int bytes = n * sizeof(float);
// events for timing
hipEvent_t startEvent, stopEvent;
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
checkCuda( hipEventRecord(startEvent, 0) );
checkCuda( hipMemcpy(d, h_a, bytes, hipMemcpyHostToDevice) );
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
float time;
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
printf(" Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
checkCuda( hipEventRecord(startEvent, 0) );
checkCuda( hipMemcpy(h_b, d, bytes, hipMemcpyDeviceToHost) );
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
for (int i = 0; i < n; ++i) {
if (h_a[i] != h_b[i]) {
printf("*** %s transfers failed ***", desc);
break;
}
}
// clean up events
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
}
int main()
{
unsigned int nElements = 4*1024*1024;
const unsigned int bytes = nElements * sizeof(float);
// host arrays
float *h_aPageable, *h_bPageable;
float *h_aPinned, *h_bPinned;
// device array
float *d_a;
// allocate and initialize
h_aPageable = (float*)malloc(bytes); // host pageable
h_bPageable = (float*)malloc(bytes); // host pageable
checkCuda( hipHostMalloc((void**)&h_aPinned, bytes) ); // host pinned
checkCuda( hipHostMalloc((void**)&h_bPinned, bytes) ); // host pinned
checkCuda( hipMalloc((void**)&d_a, bytes) ); // device
for (int i = 0; i < nElements; ++i) h_aPageable[i] = i;
memcpy(h_aPinned, h_aPageable, bytes);
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
// output device info and transfer size
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, 0) );
printf("\nDevice: %s\n", prop.name);
printf("Transfer size (MB): %d\n", bytes / (1024 * 1024));
// perform copies and report bandwidth
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
printf("\n");
// cleanup
hipFree(d_a);
hipHostFree(h_aPinned);
hipHostFree(h_bPinned);
free(h_aPageable);
free(h_bPageable);
return 0;
} | 29691a6548419587fc59b8b429cf87445f68037f.cu | /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <assert.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
void profileCopies(float *h_a,
float *h_b,
float *d,
unsigned int n,
char *desc)
{
printf("\n%s transfers\n", desc);
unsigned int bytes = n * sizeof(float);
// events for timing
cudaEvent_t startEvent, stopEvent;
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
checkCuda( cudaEventRecord(startEvent, 0) );
checkCuda( cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice) );
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
float time;
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf(" Host to Device bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
checkCuda( cudaEventRecord(startEvent, 0) );
checkCuda( cudaMemcpy(h_b, d, bytes, cudaMemcpyDeviceToHost) );
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
printf(" Device to Host bandwidth (GB/s): %f\n", bytes * 1e-6 / time);
for (int i = 0; i < n; ++i) {
if (h_a[i] != h_b[i]) {
printf("*** %s transfers failed ***", desc);
break;
}
}
// clean up events
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
}
int main()
{
unsigned int nElements = 4*1024*1024;
const unsigned int bytes = nElements * sizeof(float);
// host arrays
float *h_aPageable, *h_bPageable;
float *h_aPinned, *h_bPinned;
// device array
float *d_a;
// allocate and initialize
h_aPageable = (float*)malloc(bytes); // host pageable
h_bPageable = (float*)malloc(bytes); // host pageable
checkCuda( cudaMallocHost((void**)&h_aPinned, bytes) ); // host pinned
checkCuda( cudaMallocHost((void**)&h_bPinned, bytes) ); // host pinned
checkCuda( cudaMalloc((void**)&d_a, bytes) ); // device
for (int i = 0; i < nElements; ++i) h_aPageable[i] = i;
memcpy(h_aPinned, h_aPageable, bytes);
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
// output device info and transfer size
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, 0) );
printf("\nDevice: %s\n", prop.name);
printf("Transfer size (MB): %d\n", bytes / (1024 * 1024));
// perform copies and report bandwidth
profileCopies(h_aPageable, h_bPageable, d_a, nElements, "Pageable");
profileCopies(h_aPinned, h_bPinned, d_a, nElements, "Pinned");
printf("\n");
// cleanup
cudaFree(d_a);
cudaFreeHost(h_aPinned);
cudaFreeHost(h_bPinned);
free(h_aPageable);
free(h_bPageable);
return 0;
} |
b4b1556b1fa3ed7bac660e9c35ebda62d36d4b26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <time.h>
#include "Matrix.h"
#define COUNT 10
#define HANDLE_ERROR(hipError_t) HandleError(hipError_t, __FILE__, __LINE__)
static void HandleError(const hipError_t err, const char* const in_file, const int line)
{
if (err != hipSuccess)
{
printf("%s,\nin file %s,\nin line: %d.\n", hipGetErrorString(err), in_file, line);
exit(EXIT_FAILURE);
}
}
// [r1 x c1] * [r2 x c2] = [r1 x c2].
// c1 == r2!
__global__ void MatrixMultiplication(const el_t* const in_A, const el_t* const in_B, el_t* const out_Res, const uint rowsA, const uint colsA, const uint colsB)
{
if (blockIdx.y < rowsA && blockIdx.x < colsB)
{
el_t summ = 0;
for (uint i = 0; i < colsA; ++i)
summ += in_A[colsA * blockIdx.y + i] * in_B[colsB * i + blockIdx.x];
out_Res[colsB * blockIdx.y + blockIdx.x] = summ;
}
}
int main()
{
using namespace std;
const int q = 8192, n = 4096, m = 6144;
Matrix A = Init(n, q); // Initialize matrix A.
for (el_t i = 0; i < n * q; ++i)
A.El[i] = i + 1;
Matrix B = Init(q, m); // Initialize matrix B.
for (el_t i = 0; i < q * m; ++i)
B.El[i] = i + 1;
Matrix MatResCPU = InitZeros(A.Row, B.Col); // Initialize the matrix with zeros as the result of multiplication on the CPU.
Matrix MatResGPU = InitZeros(A.Row, B.Col); // Initialize the matrix with zeros as the result of multiplication on the GPU.
el_t* dev_A, *dev_B, *dev_MatRes;
// Allocate memory on the GPU for matrices.
HANDLE_ERROR(hipMalloc((void**)&dev_A, A.Row * A.Col * sizeof(el_t)));
HANDLE_ERROR(hipMalloc((void**)&dev_B, B.Row * B.Col * sizeof(el_t)));
HANDLE_ERROR(hipMalloc((void**)&dev_MatRes, MatResGPU.Row * MatResGPU.Col * sizeof(el_t)));
// Copy matrixes to GPU.
HANDLE_ERROR(hipMemcpy(dev_A, A.El, A.Row * A.Col * sizeof(el_t), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_B, B.El, B.Row * B.Col * sizeof(el_t), hipMemcpyHostToDevice));
double TimesMultiplyByRow[COUNT], TimesMatrixMultiplication[COUNT]; // Arrays for storing the time spent on performing multiplications.
for (int i = 0; i < COUNT; ++i) // Measure the execution time of both algorithms COUNT times.
{
FillZeros(&MatResCPU);
clock_t Start = clock();
MultiplyByRow(&A, &B, &MatResCPU); // Perform matrix multiplication on the CPU.
clock_t End = clock();
TimesMultiplyByRow[i] = ((double)End - Start) / CLOCKS_PER_SEC;
Start = clock();
hipLaunchKernelGGL(( MatrixMultiplication) , dim3(dim3(m, n)), dim3(1) , 0, 0, dev_A, dev_B, dev_MatRes, A.Row, A.Col, B.Col); // Perform matrix multiplication on the GPU.
End = clock();
TimesMatrixMultiplication[i] = ((double)End - Start) / CLOCKS_PER_SEC;
}
HANDLE_ERROR(hipMemcpy(MatResGPU.El, dev_MatRes, MatResGPU.Row * MatResGPU.Col * sizeof(el_t), hipMemcpyDeviceToHost)); // Copy the result to RAM.
// Release GPU memory.
hipFree(dev_A);
hipFree(dev_B);
hipFree(dev_MatRes);
free(A.El);
free(B.El);
double Sum = 0;
for (int i = 0; i < COUNT; ++i)
{
printf("Multiplication by CPU %i:\t%f:\n", i + 1, TimesMultiplyByRow[i]);
Sum += TimesMultiplyByRow[i];
}
printf("Multiplication by CPU (avr.):\t%f:\n", Sum / COUNT);
Sum = 0;
for (int i = 0; i < COUNT; ++i)
{
printf("Multiplication by GPU %i:\t%f:\n", i + 1, TimesMatrixMultiplication[i]);
Sum += TimesMatrixMultiplication[i];
}
printf("Multiplication by GPU (avr.):\t%f:\n", Sum / COUNT);
const auto exitCode = !VerifyMatrix(&MatResCPU, &MatResGPU);
free(MatResCPU.El);
free(MatResGPU.El);
return exitCode;
}
| b4b1556b1fa3ed7bac660e9c35ebda62d36d4b26.cu | #include <iostream>
#include <time.h>
#include "Matrix.h"
#define COUNT 10
#define HANDLE_ERROR(cudaError) HandleError(cudaError, __FILE__, __LINE__)
static void HandleError(const cudaError_t err, const char* const in_file, const int line)
{
if (err != cudaSuccess)
{
printf("%s,\nin file %s,\nin line: %d.\n", cudaGetErrorString(err), in_file, line);
exit(EXIT_FAILURE);
}
}
// [r1 x c1] * [r2 x c2] = [r1 x c2].
// c1 == r2!
__global__ void MatrixMultiplication(const el_t* const in_A, const el_t* const in_B, el_t* const out_Res, const uint rowsA, const uint colsA, const uint colsB)
{
if (blockIdx.y < rowsA && blockIdx.x < colsB)
{
el_t summ = 0;
for (uint i = 0; i < colsA; ++i)
summ += in_A[colsA * blockIdx.y + i] * in_B[colsB * i + blockIdx.x];
out_Res[colsB * blockIdx.y + blockIdx.x] = summ;
}
}
int main()
{
using namespace std;
const int q = 8192, n = 4096, m = 6144;
Matrix A = Init(n, q); // Initialize matrix A.
for (el_t i = 0; i < n * q; ++i)
A.El[i] = i + 1;
Matrix B = Init(q, m); // Initialize matrix B.
for (el_t i = 0; i < q * m; ++i)
B.El[i] = i + 1;
Matrix MatResCPU = InitZeros(A.Row, B.Col); // Initialize the matrix with zeros as the result of multiplication on the CPU.
Matrix MatResGPU = InitZeros(A.Row, B.Col); // Initialize the matrix with zeros as the result of multiplication on the GPU.
el_t* dev_A, *dev_B, *dev_MatRes;
// Allocate memory on the GPU for matrices.
HANDLE_ERROR(cudaMalloc((void**)&dev_A, A.Row * A.Col * sizeof(el_t)));
HANDLE_ERROR(cudaMalloc((void**)&dev_B, B.Row * B.Col * sizeof(el_t)));
HANDLE_ERROR(cudaMalloc((void**)&dev_MatRes, MatResGPU.Row * MatResGPU.Col * sizeof(el_t)));
// Copy matrixes to GPU.
HANDLE_ERROR(cudaMemcpy(dev_A, A.El, A.Row * A.Col * sizeof(el_t), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_B, B.El, B.Row * B.Col * sizeof(el_t), cudaMemcpyHostToDevice));
double TimesMultiplyByRow[COUNT], TimesMatrixMultiplication[COUNT]; // Arrays for storing the time spent on performing multiplications.
for (int i = 0; i < COUNT; ++i) // Measure the execution time of both algorithms COUNT times.
{
FillZeros(&MatResCPU);
clock_t Start = clock();
MultiplyByRow(&A, &B, &MatResCPU); // Perform matrix multiplication on the CPU.
clock_t End = clock();
TimesMultiplyByRow[i] = ((double)End - Start) / CLOCKS_PER_SEC;
Start = clock();
MatrixMultiplication <<<dim3(m, n), 1 >>>(dev_A, dev_B, dev_MatRes, A.Row, A.Col, B.Col); // Perform matrix multiplication on the GPU.
End = clock();
TimesMatrixMultiplication[i] = ((double)End - Start) / CLOCKS_PER_SEC;
}
HANDLE_ERROR(cudaMemcpy(MatResGPU.El, dev_MatRes, MatResGPU.Row * MatResGPU.Col * sizeof(el_t), cudaMemcpyDeviceToHost)); // Copy the result to RAM.
// Release GPU memory.
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_MatRes);
free(A.El);
free(B.El);
double Sum = 0;
for (int i = 0; i < COUNT; ++i)
{
printf("Multiplication by CPU %i:\t%f:\n", i + 1, TimesMultiplyByRow[i]);
Sum += TimesMultiplyByRow[i];
}
printf("Multiplication by CPU (avr.):\t%f:\n", Sum / COUNT);
Sum = 0;
for (int i = 0; i < COUNT; ++i)
{
printf("Multiplication by GPU %i:\t%f:\n", i + 1, TimesMatrixMultiplication[i]);
Sum += TimesMatrixMultiplication[i];
}
printf("Multiplication by GPU (avr.):\t%f:\n", Sum / COUNT);
const auto exitCode = !VerifyMatrix(&MatResCPU, &MatResGPU);
free(MatResCPU.El);
free(MatResGPU.El);
return exitCode;
}
|
4ccb5bd365b0967fc4d8ac50bba3e936d8cd029e.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace ndegtwisted {
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
//#if (__COMPUTE_CAPABILITY__) >= 200 && defined(GPU_NDEG_TWISTED_MASS_DIRAC)
#if (__COMPUTE_CAPABILITY__ >= 200) && defined(GPU_NDEG_TWISTED_MASS_DIRAC)
#include <tm_ndeg_dslash_def.h> // Non-degenerate twisted Mass
#endif
#ifndef NDEGTM_SHARED_FLOATS_PER_THREAD
#define NDEGTM_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
} // end namespace twisted
// declare the dslash events
#include <dslash_events.cuh>
using namespace ndegtwisted;
#if (__COMPUTE_CAPABILITY__ >= 200) && defined(GPU_NDEG_TWISTED_MASS_DIRAC)
template <typename sFloat, typename gFloat>
class NdegTwistedDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaTwistDslashType dslashType;
double a, b, c, d;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
NdegTwistedDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x,
const QudaTwistDslashType dslashType, const double kappa, const double mu,
const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), dslashType(dslashType)
{
bindSpinorTex<sFloat>(in, out, x);
a = kappa;
b = mu;
c = epsilon;
d = k;
if (dslashType != QUDA_NONDEG_DSLASH) errorQuda("Invalid dslashType for non-degenerate twisted-mass Dslash");
dslashParam.fl_stride = in->VolumeCB()/2;
}
virtual ~NdegTwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
strcat(key.aux,",NdegDslash");
return key;
}
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
NDEG_TM_DSLASH(twistedNdegMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, c, d, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
}
long long flops() const {
int twisted_flops = 48;
long long flops = DslashCuda::flops();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
// twisted-mass flops are done in the interior kernel
flops += twisted_flops * in->VolumeCB();
break;
}
return flops;
}
};
#endif // GPU_NDEG_TWISTED_MASS_DIRAC
#include <dslash_policy.cuh>
void ndegTwistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistDslashType type,
const double &kappa, const double &mu, const double &epsilon,
const double &k, const int *commOverride, TimeProfile &profile,
const QudaDslashPolicy &dslashPolicy)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#if (__COMPUTE_CAPABILITY__ >= 200) && defined(GPU_NDEG_TWISTED_MASS_DIRAC)
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = in->Volume() / 2;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = in->GhostFace()[i] / 2;
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new NdegTwistedDslashCuda<double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new NdegTwistedDslashCuda<float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new NdegTwistedDslashCuda<short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
}
#ifndef GPU_COMMS
DslashPolicyImp* dslashImp = DslashFactory::create(dslashPolicy);
#else
DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH);
#endif
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile);
delete dslashImp;
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
#if (__COMPUTE_CAPABILITY__ < 200)
errorQuda("Non-degenerate twisted-mass fermions not supported on pre-Fermi architecture");
#else
errorQuda("Non-degenerate twisted mass dslash has not been built");
#endif
#endif
}
}
| 4ccb5bd365b0967fc4d8ac50bba3e936d8cd029e.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace ndegtwisted {
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
//#if (__COMPUTE_CAPABILITY__) >= 200 && defined(GPU_NDEG_TWISTED_MASS_DIRAC)
#if (__COMPUTE_CAPABILITY__ >= 200) && defined(GPU_NDEG_TWISTED_MASS_DIRAC)
#include <tm_ndeg_dslash_def.h> // Non-degenerate twisted Mass
#endif
#ifndef NDEGTM_SHARED_FLOATS_PER_THREAD
#define NDEGTM_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
} // end namespace twisted
// declare the dslash events
#include <dslash_events.cuh>
using namespace ndegtwisted;
#if (__COMPUTE_CAPABILITY__ >= 200) && defined(GPU_NDEG_TWISTED_MASS_DIRAC)
template <typename sFloat, typename gFloat>
class NdegTwistedDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaTwistDslashType dslashType;
double a, b, c, d;
protected:
unsigned int sharedBytesPerThread() const
{
#if (__COMPUTE_CAPABILITY__ >= 200)
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
#else
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size;
#endif
}
public:
NdegTwistedDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x,
const QudaTwistDslashType dslashType, const double kappa, const double mu,
const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), dslashType(dslashType)
{
bindSpinorTex<sFloat>(in, out, x);
a = kappa;
b = mu;
c = epsilon;
d = k;
if (dslashType != QUDA_NONDEG_DSLASH) errorQuda("Invalid dslashType for non-degenerate twisted-mass Dslash");
dslashParam.fl_stride = in->VolumeCB()/2;
}
virtual ~NdegTwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
strcat(key.aux,",NdegDslash");
return key;
}
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
NDEG_TM_DSLASH(twistedNdegMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, c, d, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
}
long long flops() const {
int twisted_flops = 48;
long long flops = DslashCuda::flops();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
// twisted-mass flops are done in the interior kernel
flops += twisted_flops * in->VolumeCB();
break;
}
return flops;
}
};
#endif // GPU_NDEG_TWISTED_MASS_DIRAC
#include <dslash_policy.cuh>
void ndegTwistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistDslashType type,
const double &kappa, const double &mu, const double &epsilon,
const double &k, const int *commOverride, TimeProfile &profile,
const QudaDslashPolicy &dslashPolicy)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
#if (__COMPUTE_CAPABILITY__ >= 200) && defined(GPU_NDEG_TWISTED_MASS_DIRAC)
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = in->Volume() / 2;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i] = Npad*(in->GhostOffset(i) + in->Stride());
dslashParam.ghostNormOffset[i] = in->GhostNormOffset(i) + in->Stride();
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = in->GhostFace()[i] / 2;
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
#if (__COMPUTE_CAPABILITY__ >= 130)
dslash = new NdegTwistedDslashCuda<double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
#else
errorQuda("Double precision not supported on this GPU");
#endif
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new NdegTwistedDslashCuda<float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new NdegTwistedDslashCuda<short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
}
#ifndef GPU_COMMS
DslashPolicyImp* dslashImp = DslashFactory::create(dslashPolicy);
#else
DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH);
#endif
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile);
delete dslashImp;
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
#if (__COMPUTE_CAPABILITY__ < 200)
errorQuda("Non-degenerate twisted-mass fermions not supported on pre-Fermi architecture");
#else
errorQuda("Non-degenerate twisted mass dslash has not been built");
#endif
#endif
}
}
|
03f9f471abe1d5e4ddd9493e502949da7adc9262.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "multKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
const int *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const int *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
multKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
multKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
multKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 03f9f471abe1d5e4ddd9493e502949da7adc9262.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "multKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
const int *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const int *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
multKernel<<<gridBlock,threadBlock>>>(c,a,b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
multKernel<<<gridBlock,threadBlock>>>(c,a,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
multKernel<<<gridBlock,threadBlock>>>(c,a,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c3517491d1ba0e6ca42731ba6b530d4a35be430f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "../common/CudaSafeCall.h"
#include "fourVglobalVariables.cuh"
#include "typedefSparse.h"
#include "sparsePrototypes.cuh"
#include "typedeffourV.h"
#include "fourVhostPrototypes.h"
#include "fourVdevicePrototypes.cuh"
void fourV_init(char** res) {
}
void fourV_gateinit(int memSize, size_t* pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF) {
hipHostMalloc((void**)&(gate_h->u), memSize, 0);
hipHostMalloc((void**)&(gate_h->v), memSize, 0);
hipHostMalloc((void**)&(gate_h->w), memSize, 0);
hipHostMalloc((void**)&(gate_h->s), memSize, 0);
// Allocate device memory arrays
CudaSafeCall(hipMallocPitch((void **)&gate_dev->u, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->v, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->w, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->s, pitch,
memSize, 1));
// Allocate device forward memory arrays
CudaSafeCall(hipMallocPitch((void **)&gate_devF->u, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->v, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->w, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->s, pitch,
memSize, 1));
puts("\nFinished allocating device arrays\n");
int totpoints = (int)memSize / sizeof(real);
for (int idx = 0; idx < totpoints; idx++) {
gate_h->u[idx] = 0.0;
gate_h->v[idx] = 1.0;
gate_h->w[idx] = 1.0;
gate_h->s[idx] = 0.0;
}
CudaSafeCall(hipMemcpy2D((void *)gate_dev->u, *pitch, (void *)gate_h->u,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->v, *pitch, (void *)gate_h->v,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->w, *pitch, (void *)gate_h->w,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->s, *pitch, (void *)gate_h->s,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->u, *pitch, (void *)gate_h->u,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->v, *pitch, (void *)gate_h->v,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->w, *pitch, (void *)gate_h->w,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->s, *pitch, (void *)gate_h->s,
memSize, memSize, 1, hipMemcpyHostToDevice));
real** qpH = (real**)malloc(sizeof(real *)*gate_h->qpl);
int i = 0;
qpH[i++] = gate_devF->v;
qpH[i++] = gate_devF->w;
qpH[i++] = gate_devF->s;
CudaSafeCall(hipMemcpy((void *)gate_devF->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, hipMemcpyHostToDevice));
i = 0;
qpH[i++] = gate_dev->v;
qpH[i++] = gate_dev->w;
qpH[i++] = gate_dev->s;
CudaSafeCall(hipMemcpy((void *)gate_dev->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, hipMemcpyHostToDevice));
gate_h->vm = (real*)malloc(sizeof(real*));
gate_h->vm = gate_h->u;
real* point;
CudaSafeCall(hipMalloc((void **)&gate_dev->vm, sizeof(gate_dev->v)));
point = gate_dev->v;
CudaSafeCall(hipMemcpy((void *)gate_dev->vm, (void *)point, sizeof(real *), hipMemcpyHostToDevice));
CudaSafeCall(hipMalloc((void **)&gate_devF->vm, sizeof(gate_devF->v)));
point = gate_devF->v;
CudaSafeCall(hipMemcpy((void *)gate_devF->vm, (void *)point, sizeof(real *), hipMemcpyHostToDevice));
CudaCheckError();
puts("\nFinished initializing device arrays\n");
}
void fourV_sync(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev) {
CudaSafeCall(hipMemcpy2D((void *)gate_h->u, memSize, (void *)gate_dev->u,
pitch, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->v, memSize, (void *)gate_dev->v,
pitch, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->w, memSize, (void *)gate_dev->w,
pitch, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->s, memSize, (void *)gate_dev->s,
pitch, memSize, 1, hipMemcpyDeviceToHost));
}
void fourV_exit(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF, sparse* MatrixINT, cudasparse* cudaMatrixINT){
// Free gate host and device memory
hipHostFree(gate_h->u); hipHostFree(gate_h->v); hipHostFree(gate_h->w); hipHostFree(gate_h->s);
hipHostFree(gate_h->vm);
hipFree(gate_dev->u); hipFree(gate_dev->v); hipFree(gate_dev->w); hipFree(gate_dev->s);
hipFree(gate_dev->vm); hipFree(gate_h->qp);
hipFree(gate_devF->u); hipFree(gate_devF->v); hipFree(gate_devF->w); hipFree(gate_devF->s);
hipFree(gate_devF->vm); hipFree(gate_h->qp);
hipFree(cudaMatrixINT->type);
hipFree(cudaMatrixINT->rows);
hipFree(cudaMatrixINT->maxnz);
hipFree(cudaMatrixINT->csep);
hipFree(cudaMatrixINT->jcoef);
hipFree(cudaMatrixINT->coef);
}
/*========================================================================
* 2D : 4-Variable Model Time Integrator
*========================================================================
*/
void __device__ GetFDev_fourV(int i2d, int pitch, real beta, real Cm, real t, real dt, int totpoints, real rx, gateType g_dev, gateType g_devF) {
/*------------------------------------------------------------------------
* return if outside domain
*------------------------------------------------------------------------
*/
if (i2d >= totpoints) {
return;
}
/*------------------------------------------------------------------------
* setting local variables
*------------------------------------------------------------------------
*/
real u = g_dev.u[i2d];
real v = g_dev.v[i2d];
real w = g_dev.w[i2d];
real s = g_dev.s[i2d];
real fu = g_devF.u[i2d];
/*------------------------------------------------------------------------
* Additional heaviside functions
*------------------------------------------------------------------------
*/
real H_theta_v = (u > theta_v) ? 1.0 : 0.0;
real H_theta_m_v = (u > theta_m_v) ? 1.0 : 0.0;
real H_theta_w = (u > theta_w) ? 1.0 : 0.0;
real H_theta_so = (u > theta_so) ? 1.0 : 0.0;
real H_theta_si = (u > theta_si) ? 1.0 : 0.0;
real H_theta_s = (u > theta_s) ? 1.0 : 0.0;
real H_theta_o = (u > theta_o) ? 1.0 : 0.0;
real H_theta_vinf = (u > theta_vinf) ? 1.0 : 0.0;
real H_theta_winf = (u > theta_winf) ? 1.0 : 0.0;
/*------------------------------------------------------------------------
* Calculating dependant tau's
*------------------------------------------------------------------------
*/
real tau_m_v = (1.0 - H_theta_m_v)*tau_m_v1
+ H_theta_m_v * tau_m_v2;
real tau_m_w = tau_m_w1
+ (tau_m_w2 - tau_m_w1)*(1. + tanh(k_m_w*(u - u_m_w)))*0.5;
real tau_p_w = tau_p_w1
+ (tau_p_w2 - tau_p_w1)*(1. + tanh(k_p_w*(
delta_w*(w - w_p_c) + (1. - delta_w)*(u - u_p_w))))*0.5;
real tau_s = (1. - H_theta_s)*tau_s1 + H_theta_s * tau_s2;
real tau_o = (1. - H_theta_o)*tau_o1 + H_theta_o * tau_o2;
real tau_so = tau_so1
+ (tau_so2 - tau_so1)*(1. + tanh(k_so*(u - u_so)))*0.5;
real tau_si = tau_si1
+ (tau_si2 - tau_si1)*(1. + tanh(k_si*(s - s_c)))*0.5;
real tau_p_si = alpha_si * (1. + exp(k_si1*(u - theta_p_si))) /
(1. - tanh(k_si2*(u - theta_p_si)));
real v_inf = 1. - H_theta_vinf;
real w_inf = (1. - H_theta_winf)*(1. - u / tau_winf)
+ H_theta_winf * w_sinf;
/*------------------------------------------------------------------------
* v
*------------------------------------------------------------------------
*/
real dv2dt = (1. - H_theta_v)*(v_inf - v) / tau_m_v
- H_theta_v * v / tau_p_v;
v += dv2dt * dt;
g_devF.v[i2d] = v;
/*------------------------------------------------------------------------
* w
*------------------------------------------------------------------------
*/
real wx = (2. - alpha_w)*(3. - alpha_w)*(4. - alpha_w)*w / 6.0
+ (alpha_w - 1.)*(3. - alpha_w)*(4. - alpha_w)*0.5*w*w
+ (alpha_w - 1.)*(alpha_w - 2.)*(4. - alpha_w)*0.5*w*w*w
+ (alpha_w - 1.)*(alpha_w - 2.)*(alpha_w - 3.)*w*w*w*w / 6.;
real dw2dt = (1. - H_theta_w)*(w_inf - wx) / tau_m_w
- H_theta_w * w / tau_p_w;
w += dw2dt * dt;
g_devF.w[i2d] = w;
/*------------------------------------------------------------------------
* s
*------------------------------------------------------------------------
*/
real ds2dt = ((1. + tanh(k_s*(u - u_s)))*0.5 - s) / tau_s;
s += ds2dt * dt;
g_devF.s[i2d] = s;
/*------------------------------------------------------------------------
* I_sum
*------------------------------------------------------------------------
*/
real J_fi = -v * H_theta_v*(u - theta_p_v)*(u_u - u) / tau_fi;
real J_so = (u - u_o)*(1. - H_theta_so)*(1. - beta_v * v) / tau_o
+ H_theta_so / tau_so;
real J_si;
if (gamma_si > 0.5)
J_si = -H_theta_si * w*s / tau_si;
else
J_si = -(1. + tanh(k_si_c*(u - theta_si_c)))*w / tau_p_si;
real I_sum = J_fi + J_so + J_si;
/*------------------------------------------------------------------------
* Time integration
*------------------------------------------------------------------------
*/
fu -= dt * I_sum / Cm;
g_devF.u[i2d] = u;
} | c3517491d1ba0e6ca42731ba6b530d4a35be430f.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "../common/CudaSafeCall.h"
#include "fourVglobalVariables.cuh"
#include "typedefSparse.h"
#include "sparsePrototypes.cuh"
#include "typedeffourV.h"
#include "fourVhostPrototypes.h"
#include "fourVdevicePrototypes.cuh"
void fourV_init(char** res) {
}
void fourV_gateinit(int memSize, size_t* pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF) {
cudaHostAlloc((void**)&(gate_h->u), memSize, 0);
cudaHostAlloc((void**)&(gate_h->v), memSize, 0);
cudaHostAlloc((void**)&(gate_h->w), memSize, 0);
cudaHostAlloc((void**)&(gate_h->s), memSize, 0);
// Allocate device memory arrays
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->u, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->v, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->w, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->s, pitch,
memSize, 1));
// Allocate device forward memory arrays
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->u, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->v, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->w, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->s, pitch,
memSize, 1));
puts("\nFinished allocating device arrays\n");
int totpoints = (int)memSize / sizeof(real);
for (int idx = 0; idx < totpoints; idx++) {
gate_h->u[idx] = 0.0;
gate_h->v[idx] = 1.0;
gate_h->w[idx] = 1.0;
gate_h->s[idx] = 0.0;
}
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->u, *pitch, (void *)gate_h->u,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->v, *pitch, (void *)gate_h->v,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->w, *pitch, (void *)gate_h->w,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->s, *pitch, (void *)gate_h->s,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->u, *pitch, (void *)gate_h->u,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->v, *pitch, (void *)gate_h->v,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->w, *pitch, (void *)gate_h->w,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->s, *pitch, (void *)gate_h->s,
memSize, memSize, 1, cudaMemcpyHostToDevice));
real** qpH = (real**)malloc(sizeof(real *)*gate_h->qpl);
int i = 0;
qpH[i++] = gate_devF->v;
qpH[i++] = gate_devF->w;
qpH[i++] = gate_devF->s;
CudaSafeCall(cudaMemcpy((void *)gate_devF->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, cudaMemcpyHostToDevice));
i = 0;
qpH[i++] = gate_dev->v;
qpH[i++] = gate_dev->w;
qpH[i++] = gate_dev->s;
CudaSafeCall(cudaMemcpy((void *)gate_dev->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, cudaMemcpyHostToDevice));
gate_h->vm = (real*)malloc(sizeof(real*));
gate_h->vm = gate_h->u;
real* point;
CudaSafeCall(cudaMalloc((void **)&gate_dev->vm, sizeof(gate_dev->v)));
point = gate_dev->v;
CudaSafeCall(cudaMemcpy((void *)gate_dev->vm, (void *)point, sizeof(real *), cudaMemcpyHostToDevice));
CudaSafeCall(cudaMalloc((void **)&gate_devF->vm, sizeof(gate_devF->v)));
point = gate_devF->v;
CudaSafeCall(cudaMemcpy((void *)gate_devF->vm, (void *)point, sizeof(real *), cudaMemcpyHostToDevice));
CudaCheckError();
puts("\nFinished initializing device arrays\n");
}
void fourV_sync(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev) {
CudaSafeCall(cudaMemcpy2D((void *)gate_h->u, memSize, (void *)gate_dev->u,
pitch, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->v, memSize, (void *)gate_dev->v,
pitch, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->w, memSize, (void *)gate_dev->w,
pitch, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->s, memSize, (void *)gate_dev->s,
pitch, memSize, 1, cudaMemcpyDeviceToHost));
}
void fourV_exit(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF, sparse* MatrixINT, cudasparse* cudaMatrixINT){
// Free gate host and device memory
cudaFreeHost(gate_h->u); cudaFreeHost(gate_h->v); cudaFreeHost(gate_h->w); cudaFreeHost(gate_h->s);
cudaFreeHost(gate_h->vm);
cudaFree(gate_dev->u); cudaFree(gate_dev->v); cudaFree(gate_dev->w); cudaFree(gate_dev->s);
cudaFree(gate_dev->vm); cudaFree(gate_h->qp);
cudaFree(gate_devF->u); cudaFree(gate_devF->v); cudaFree(gate_devF->w); cudaFree(gate_devF->s);
cudaFree(gate_devF->vm); cudaFree(gate_h->qp);
cudaFree(cudaMatrixINT->type);
cudaFree(cudaMatrixINT->rows);
cudaFree(cudaMatrixINT->maxnz);
cudaFree(cudaMatrixINT->csep);
cudaFree(cudaMatrixINT->jcoef);
cudaFree(cudaMatrixINT->coef);
}
/*========================================================================
* 2D : 4-Variable Model Time Integrator
*========================================================================
*/
void __device__ GetFDev_fourV(int i2d, int pitch, real beta, real Cm, real t, real dt, int totpoints, real rx, gateType g_dev, gateType g_devF) {
/*------------------------------------------------------------------------
* return if outside domain
*------------------------------------------------------------------------
*/
if (i2d >= totpoints) {
return;
}
/*------------------------------------------------------------------------
* setting local variables
*------------------------------------------------------------------------
*/
real u = g_dev.u[i2d];
real v = g_dev.v[i2d];
real w = g_dev.w[i2d];
real s = g_dev.s[i2d];
real fu = g_devF.u[i2d];
/*------------------------------------------------------------------------
* Additional heaviside functions
*------------------------------------------------------------------------
*/
real H_theta_v = (u > theta_v) ? 1.0 : 0.0;
real H_theta_m_v = (u > theta_m_v) ? 1.0 : 0.0;
real H_theta_w = (u > theta_w) ? 1.0 : 0.0;
real H_theta_so = (u > theta_so) ? 1.0 : 0.0;
real H_theta_si = (u > theta_si) ? 1.0 : 0.0;
real H_theta_s = (u > theta_s) ? 1.0 : 0.0;
real H_theta_o = (u > theta_o) ? 1.0 : 0.0;
real H_theta_vinf = (u > theta_vinf) ? 1.0 : 0.0;
real H_theta_winf = (u > theta_winf) ? 1.0 : 0.0;
/*------------------------------------------------------------------------
* Calculating dependant tau's
*------------------------------------------------------------------------
*/
real tau_m_v = (1.0 - H_theta_m_v)*tau_m_v1
+ H_theta_m_v * tau_m_v2;
real tau_m_w = tau_m_w1
+ (tau_m_w2 - tau_m_w1)*(1. + tanh(k_m_w*(u - u_m_w)))*0.5;
real tau_p_w = tau_p_w1
+ (tau_p_w2 - tau_p_w1)*(1. + tanh(k_p_w*(
delta_w*(w - w_p_c) + (1. - delta_w)*(u - u_p_w))))*0.5;
real tau_s = (1. - H_theta_s)*tau_s1 + H_theta_s * tau_s2;
real tau_o = (1. - H_theta_o)*tau_o1 + H_theta_o * tau_o2;
real tau_so = tau_so1
+ (tau_so2 - tau_so1)*(1. + tanh(k_so*(u - u_so)))*0.5;
real tau_si = tau_si1
+ (tau_si2 - tau_si1)*(1. + tanh(k_si*(s - s_c)))*0.5;
real tau_p_si = alpha_si * (1. + exp(k_si1*(u - theta_p_si))) /
(1. - tanh(k_si2*(u - theta_p_si)));
real v_inf = 1. - H_theta_vinf;
real w_inf = (1. - H_theta_winf)*(1. - u / tau_winf)
+ H_theta_winf * w_sinf;
/*------------------------------------------------------------------------
* v
*------------------------------------------------------------------------
*/
real dv2dt = (1. - H_theta_v)*(v_inf - v) / tau_m_v
- H_theta_v * v / tau_p_v;
v += dv2dt * dt;
g_devF.v[i2d] = v;
/*------------------------------------------------------------------------
* w
*------------------------------------------------------------------------
*/
real wx = (2. - alpha_w)*(3. - alpha_w)*(4. - alpha_w)*w / 6.0
+ (alpha_w - 1.)*(3. - alpha_w)*(4. - alpha_w)*0.5*w*w
+ (alpha_w - 1.)*(alpha_w - 2.)*(4. - alpha_w)*0.5*w*w*w
+ (alpha_w - 1.)*(alpha_w - 2.)*(alpha_w - 3.)*w*w*w*w / 6.;
real dw2dt = (1. - H_theta_w)*(w_inf - wx) / tau_m_w
- H_theta_w * w / tau_p_w;
w += dw2dt * dt;
g_devF.w[i2d] = w;
/*------------------------------------------------------------------------
* s
*------------------------------------------------------------------------
*/
real ds2dt = ((1. + tanh(k_s*(u - u_s)))*0.5 - s) / tau_s;
s += ds2dt * dt;
g_devF.s[i2d] = s;
/*------------------------------------------------------------------------
* I_sum
*------------------------------------------------------------------------
*/
real J_fi = -v * H_theta_v*(u - theta_p_v)*(u_u - u) / tau_fi;
real J_so = (u - u_o)*(1. - H_theta_so)*(1. - beta_v * v) / tau_o
+ H_theta_so / tau_so;
real J_si;
if (gamma_si > 0.5)
J_si = -H_theta_si * w*s / tau_si;
else
J_si = -(1. + tanh(k_si_c*(u - theta_si_c)))*w / tau_p_si;
real I_sum = J_fi + J_so + J_si;
/*------------------------------------------------------------------------
* Time integration
*------------------------------------------------------------------------
*/
fu -= dt * I_sum / Cm;
g_devF.u[i2d] = u;
} |
6b39546a1a7535ad21780a08925dcef041eac710.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/half_float_ops.h"
#include "caffe2/core/context_gpu.h"
#ifdef CAFFE_HAS_CUDA_FP16
namespace caffe2 {
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
}
template <>
bool FloatToHalfOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0, X.sizes(), at::dtype<at::Half>());
hipLaunchKernelGGL(( FloatToHalfKernel),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.numel(),
X.data<float>(),
reinterpret_cast<half*>(Y->template mutable_data<at::Half>()));
return true;
}
template <>
bool HalfToFloatOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( HalfToFloatKernel),
dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.numel(),
reinterpret_cast<const half*>(X.data<at::Half>()),
Y->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(FloatToHalf, FloatToHalfOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(HalfToFloat, HalfToFloatOp<CUDAContext>);
} // namespace caffe2
#endif // CAFFE_HAS_CUDA_FP16
| 6b39546a1a7535ad21780a08925dcef041eac710.cu | #include "caffe2/operators/half_float_ops.h"
#include "caffe2/core/context_gpu.h"
#ifdef CAFFE_HAS_CUDA_FP16
namespace caffe2 {
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
}
template <>
bool FloatToHalfOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0, X.sizes(), at::dtype<at::Half>());
FloatToHalfKernel<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(),
X.data<float>(),
reinterpret_cast<half*>(Y->template mutable_data<at::Half>()));
return true;
}
template <>
bool HalfToFloatOp<CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
HalfToFloatKernel<<<
CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(),
reinterpret_cast<const half*>(X.data<at::Half>()),
Y->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(FloatToHalf, FloatToHalfOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(HalfToFloat, HalfToFloatOp<CUDAContext>);
} // namespace caffe2
#endif // CAFFE_HAS_CUDA_FP16
|
b37512f7f9b065529bb064920bf144363b986a1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "MappedThrustFunctor.hh"
__device__ fptype device_Mapped (fptype* evt, fptype* p, unsigned int* indices) {
// Structure : nP mapFunctionIndex mapParamIndex functionIndex1 parameterIndex1 functionIndex2 parameterIndex2 ...
// Find mapping between event variables and function to evaluate
unsigned int mapFunction = indices[1];
// This is an index into the MappedThrustFunctor's list of functions
int targetFunction = (int) FLOOR(0.5 + (*(reinterpret_cast<device_function_ptr>(device_function_table[mapFunction])))(evt, p, paramIndices + indices[2]));
targetFunction *= 2; // Because there are two pieces of information about each function
targetFunction += 3; // Because first function information begins at index 3
fptype ret = (*(reinterpret_cast<device_function_ptr>(device_function_table[indices[targetFunction]])))(evt, p, paramIndices + indices[targetFunction + 1]);
ret *= normalisationFactors[indices[targetFunction + 1]];
//if (gpuDebug & 1)
//if ((gpuDebug & 1) && (0 == blockIdx.x) && (0 == threadIdx.x))
//printf("[%i, %i] Mapped: %i (%f %f %f %f) %f\n", blockIdx.x, threadIdx.x, targetFunction, evt[0], evt[1], evt[2], evt[3], ret);
return ret;
}
__device__ device_function_ptr ptr_to_Mapped = device_Mapped;
__host__ MappedThrustFunctor::MappedThrustFunctor (std::string n, ThrustPdfFunctor* m, vector<ThrustPdfFunctor*>& t)
: ThrustPdfFunctor(0, n)
{
components.push_back(m);
std::vector<unsigned int> pindices;
pindices.push_back(m->getFunctionIndex());
pindices.push_back(m->getParameterIndex());
std::set<int> functionIndicesUsed;
for (vector<ThrustPdfFunctor*>::iterator f = t.begin(); f != t.end(); ++f) {
components.push_back(*f);
pindices.push_back((*f)->getFunctionIndex());
pindices.push_back((*f)->getParameterIndex());
functionIndicesUsed.insert((*f)->getFunctionIndex());
}
if (functionIndicesUsed.size() > 1) {
std::cout << "Warning: More than one function type given to MappedThrustFunctor "
<< getName()
<< " constructor. This may slow execution by causing sequential evaluations.\n";
}
getObservables(observables);
hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_Mapped, sizeof(void*));
initialise(pindices);
}
__host__ fptype MappedThrustFunctor::normalise () const {
//std::cout << "Normalising MappedThrustFunctor " << getName() << std::endl;
fptype ret = 0;
for (unsigned int i = 1; i < components.size(); ++i) { // No need to normalise mapping function.
fptype curr = components[i]->normalise();
ret += curr;
}
host_normalisation[parameters] = 1.0;
return ret;
}
| b37512f7f9b065529bb064920bf144363b986a1c.cu | #include "MappedThrustFunctor.hh"
__device__ fptype device_Mapped (fptype* evt, fptype* p, unsigned int* indices) {
// Structure : nP mapFunctionIndex mapParamIndex functionIndex1 parameterIndex1 functionIndex2 parameterIndex2 ...
// Find mapping between event variables and function to evaluate
unsigned int mapFunction = indices[1];
// This is an index into the MappedThrustFunctor's list of functions
int targetFunction = (int) FLOOR(0.5 + (*(reinterpret_cast<device_function_ptr>(device_function_table[mapFunction])))(evt, p, paramIndices + indices[2]));
targetFunction *= 2; // Because there are two pieces of information about each function
targetFunction += 3; // Because first function information begins at index 3
fptype ret = (*(reinterpret_cast<device_function_ptr>(device_function_table[indices[targetFunction]])))(evt, p, paramIndices + indices[targetFunction + 1]);
ret *= normalisationFactors[indices[targetFunction + 1]];
//if (gpuDebug & 1)
//if ((gpuDebug & 1) && (0 == blockIdx.x) && (0 == threadIdx.x))
//printf("[%i, %i] Mapped: %i (%f %f %f %f) %f\n", blockIdx.x, threadIdx.x, targetFunction, evt[0], evt[1], evt[2], evt[3], ret);
return ret;
}
__device__ device_function_ptr ptr_to_Mapped = device_Mapped;
__host__ MappedThrustFunctor::MappedThrustFunctor (std::string n, ThrustPdfFunctor* m, vector<ThrustPdfFunctor*>& t)
: ThrustPdfFunctor(0, n)
{
components.push_back(m);
std::vector<unsigned int> pindices;
pindices.push_back(m->getFunctionIndex());
pindices.push_back(m->getParameterIndex());
std::set<int> functionIndicesUsed;
for (vector<ThrustPdfFunctor*>::iterator f = t.begin(); f != t.end(); ++f) {
components.push_back(*f);
pindices.push_back((*f)->getFunctionIndex());
pindices.push_back((*f)->getParameterIndex());
functionIndicesUsed.insert((*f)->getFunctionIndex());
}
if (functionIndicesUsed.size() > 1) {
std::cout << "Warning: More than one function type given to MappedThrustFunctor "
<< getName()
<< " constructor. This may slow execution by causing sequential evaluations.\n";
}
getObservables(observables);
cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_Mapped, sizeof(void*));
initialise(pindices);
}
__host__ fptype MappedThrustFunctor::normalise () const {
//std::cout << "Normalising MappedThrustFunctor " << getName() << std::endl;
fptype ret = 0;
for (unsigned int i = 1; i < components.size(); ++i) { // No need to normalise mapping function.
fptype curr = components[i]->normalise();
ret += curr;
}
host_normalisation[parameters] = 1.0;
return ret;
}
|
4206f7f52cd4fb6e349e7a6793e5f6e71019a0e8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "Field.h"
//#define TIMER
__global__ void moveParticle_kernel(int N, ushort2* position, bool* sign, float2* velocity, float2* field, int width, int height)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
ushort2 pos = position[i];
float2 vel = velocity[i];
// aktualizacja predkosci
int pixel_idx = pos.y * width + pos.x;
float2 f = field[pixel_idx];
if (sign[i])
{
vel.x += f.x;
vel.y += f.y;
}
else
{
vel.x -= f.x;
vel.y -= f.y;
}
// aktualizacja polozenia
pos.x += vel.x;
pos.y += vel.y;
// sprawdzenie, czy czasteczka wyszla poza ramke
if (pos.x > width - 2)
{
pos.x = width - 1;
vel.x = -vel.x;
}
else if (pos.x < 1)
{
pos.x = 0;
vel.x = -vel.x;
}
if (pos.y > height - 2)
{
pos.y = height - 1;
vel.y = -vel.y;
}
else if (pos.y < 1)
{
pos.y = 0;
vel.y = -vel.y;
}
position[i] = pos;
velocity[i] = vel;
}
}
void moveParticle_caller(Field& f)
{
int threadsPerBlock = 64;
int blocks = f.N / threadsPerBlock;
if (f.N % threadsPerBlock != 0)
blocks++;
#ifdef TIMER
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
hipLaunchKernelGGL(( moveParticle_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, f.N, f.d_positions, f.d_sign, f.d_velocities, f.d_fieldForce, f.width, f.height);
#ifdef TIMER
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("%f\n", time*1000);
#endif
hipError_t cudaStatus;
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "moveParticle_kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching moveParticle_kernel!\n", cudaStatus);
fprintf(stderr, "%s\n", hipGetErrorString(cudaStatus));
}
} | 4206f7f52cd4fb6e349e7a6793e5f6e71019a0e8.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include "Field.h"
//#define TIMER
__global__ void moveParticle_kernel(int N, ushort2* position, bool* sign, float2* velocity, float2* field, int width, int height)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
ushort2 pos = position[i];
float2 vel = velocity[i];
// aktualizacja predkosci
int pixel_idx = pos.y * width + pos.x;
float2 f = field[pixel_idx];
if (sign[i])
{
vel.x += f.x;
vel.y += f.y;
}
else
{
vel.x -= f.x;
vel.y -= f.y;
}
// aktualizacja polozenia
pos.x += vel.x;
pos.y += vel.y;
// sprawdzenie, czy czasteczka wyszla poza ramke
if (pos.x > width - 2)
{
pos.x = width - 1;
vel.x = -vel.x;
}
else if (pos.x < 1)
{
pos.x = 0;
vel.x = -vel.x;
}
if (pos.y > height - 2)
{
pos.y = height - 1;
vel.y = -vel.y;
}
else if (pos.y < 1)
{
pos.y = 0;
vel.y = -vel.y;
}
position[i] = pos;
velocity[i] = vel;
}
}
void moveParticle_caller(Field& f)
{
int threadsPerBlock = 64;
int blocks = f.N / threadsPerBlock;
if (f.N % threadsPerBlock != 0)
blocks++;
#ifdef TIMER
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
moveParticle_kernel<<<blocks, threadsPerBlock>>>(f.N, f.d_positions, f.d_sign, f.d_velocities, f.d_fieldForce, f.width, f.height);
#ifdef TIMER
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("%f\n", time*1000);
#endif
cudaError_t cudaStatus;
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "moveParticle_kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching moveParticle_kernel!\n", cudaStatus);
fprintf(stderr, "%s\n", cudaGetErrorString(cudaStatus));
}
} |
56f9ad3d8e5bc88c06923f944aa49f54da054205.hip | // !!! This is a file automatically generated by hipify!!!
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
//#include <hip/hip_runtime.h>
//#include <stdio.h>
//#include <iostream>
//#include <time.h>
//#include "common.h"
////#include "cuda_common.cuh"
//
//#define HISTOGRAM_BUCKET_SIZE 10
//
////---------------------------------------------------KERNELS AND CPU IMPLEMENTATIONS-----------------------------//
//
////in this kernel we expect grid with only single block
//__global__ void histogram_gpu_v01(int* input, int*output, int input_size)
//{
// atomicAdd(&(output[input[threadIdx.x]]),1);
//}
//
//// in this kernel we expect 1D grid with multiple 1D blocks
//// reduce the global memory writes by introducing shared memory intermideate store
//__global__ void histogram_gpu_v02(int* input, int*output, int input_size)
//{
// __shared__ int block_output[HISTOGRAM_BUCKET_SIZE];
//
// int gid = blockIdx.x*blockDim.x + threadIdx.x;
//
// atomicAdd(&(block_output[input[gid]]), 1);
// __syncthreads();
//
// if (threadIdx.x < HISTOGRAM_BUCKET_SIZE)
// {
// atomicAdd(&(output[threadIdx.x]), block_output[threadIdx.x]);
// }
//}
//
////crating thread for to represent each element in the array may be inefficient
////so change the kernel so that single threads handles multiple elements
//__global__ void histogram_gpu_v03(int* input, int*output, int input_size)
//{
// //to be implements
//}
//
//void histogram_cpu(int * input, int* output, int input_size)
//{
// for (int i = 0; i < input_size; i++)
// {
// output[input[i]]++;
// }
//}
//
//
////---------------------------------------------------RUNING FUNCTIONS--------------------------------------------//
//
//void run_histogram_cpu(int input_size, int histogram_buckets)
//{
// int * input, *output;
// int input_byte_size = sizeof(int)*input_size;
// int histogram_bucket_byte_size = sizeof(int)*histogram_buckets;
//
// input = (int*)malloc(input_byte_size);
// output = (int*)malloc(histogram_bucket_byte_size);
// memset(output, 0, histogram_bucket_byte_size);
//
// initialize(input, input_size);
// printf("Printing input array \n");
// print_array(input,input_size);
//
// histogram_cpu(input,output,input_size);
// printf("Printing histogram array \n");
// print_array(output, histogram_buckets);
//
// free(output);
// free(input);
//}
//
//void run_histogram_gpu(int input_size, int histogram_buckets)
//{
// int * h_input, *h_ref;
// int input_byte_size = sizeof(int)*input_size;
// int histogram_bucket_byte_size = sizeof(int)*histogram_buckets;
//
// h_input = (int*)malloc(input_byte_size);
// h_ref = (int*)malloc(histogram_bucket_byte_size);;
//
// initialize(h_input, input_size);
//
// int * d_input, *d_output;
// hipMalloc((int**)&d_input, input_byte_size);
// hipMalloc((int**)&d_output,histogram_bucket_byte_size);
//
// dim3 grid(4);
// dim3 block(input_size/grid.x);
//
// hipMemset(d_output, 0, histogram_bucket_byte_size);
// hipMemcpy(d_input, h_input, input_byte_size, hipMemcpyHostToDevice);
//
// histogram_gpu_v02 << <grid,block >> > (d_input, d_output, input_size);
// hipDeviceSynchronize();
//
// hipMemcpy(h_ref, d_output, histogram_bucket_byte_size, hipMemcpyDeviceToHost);
//
// print_array(h_ref,histogram_buckets);
//
// hipFree(d_output);
// hipFree(d_input);
//
// free(h_ref);
// free(h_input);
//}
//
////int main()
////{
//// printf("--------------------RUNNING HISTOGRAM EXAMPLE------------------------- \n");
//// int input_size = 1024;
//// int histogram_buckets = 10;
//// run_histogram_gpu(input_size,histogram_buckets);
////
//// system("pause");
//// return 0;
////} | 56f9ad3d8e5bc88c06923f944aa49f54da054205.cu | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//#include <cuda.h>
//#include <stdio.h>
//#include <iostream>
//#include <time.h>
//#include "common.h"
////#include "cuda_common.cuh"
//
//#define HISTOGRAM_BUCKET_SIZE 10
//
////---------------------------------------------------KERNELS AND CPU IMPLEMENTATIONS-----------------------------//
//
////in this kernel we expect grid with only single block
//__global__ void histogram_gpu_v01(int* input, int*output, int input_size)
//{
// atomicAdd(&(output[input[threadIdx.x]]),1);
//}
//
//// in this kernel we expect 1D grid with multiple 1D blocks
//// reduce the global memory writes by introducing shared memory intermideate store
//__global__ void histogram_gpu_v02(int* input, int*output, int input_size)
//{
// __shared__ int block_output[HISTOGRAM_BUCKET_SIZE];
//
// int gid = blockIdx.x*blockDim.x + threadIdx.x;
//
// atomicAdd(&(block_output[input[gid]]), 1);
// __syncthreads();
//
// if (threadIdx.x < HISTOGRAM_BUCKET_SIZE)
// {
// atomicAdd(&(output[threadIdx.x]), block_output[threadIdx.x]);
// }
//}
//
////crating thread for to represent each element in the array may be inefficient
////so change the kernel so that single threads handles multiple elements
//__global__ void histogram_gpu_v03(int* input, int*output, int input_size)
//{
// //to be implements
//}
//
//void histogram_cpu(int * input, int* output, int input_size)
//{
// for (int i = 0; i < input_size; i++)
// {
// output[input[i]]++;
// }
//}
//
//
////---------------------------------------------------RUNING FUNCTIONS--------------------------------------------//
//
//void run_histogram_cpu(int input_size, int histogram_buckets)
//{
// int * input, *output;
// int input_byte_size = sizeof(int)*input_size;
// int histogram_bucket_byte_size = sizeof(int)*histogram_buckets;
//
// input = (int*)malloc(input_byte_size);
// output = (int*)malloc(histogram_bucket_byte_size);
// memset(output, 0, histogram_bucket_byte_size);
//
// initialize(input, input_size);
// printf("Printing input array \n");
// print_array(input,input_size);
//
// histogram_cpu(input,output,input_size);
// printf("Printing histogram array \n");
// print_array(output, histogram_buckets);
//
// free(output);
// free(input);
//}
//
//void run_histogram_gpu(int input_size, int histogram_buckets)
//{
// int * h_input, *h_ref;
// int input_byte_size = sizeof(int)*input_size;
// int histogram_bucket_byte_size = sizeof(int)*histogram_buckets;
//
// h_input = (int*)malloc(input_byte_size);
// h_ref = (int*)malloc(histogram_bucket_byte_size);;
//
// initialize(h_input, input_size);
//
// int * d_input, *d_output;
// cudaMalloc((int**)&d_input, input_byte_size);
// cudaMalloc((int**)&d_output,histogram_bucket_byte_size);
//
// dim3 grid(4);
// dim3 block(input_size/grid.x);
//
// cudaMemset(d_output, 0, histogram_bucket_byte_size);
// cudaMemcpy(d_input, h_input, input_byte_size, cudaMemcpyHostToDevice);
//
// histogram_gpu_v02 << <grid,block >> > (d_input, d_output, input_size);
// cudaDeviceSynchronize();
//
// cudaMemcpy(h_ref, d_output, histogram_bucket_byte_size, cudaMemcpyDeviceToHost);
//
// print_array(h_ref,histogram_buckets);
//
// cudaFree(d_output);
// cudaFree(d_input);
//
// free(h_ref);
// free(h_input);
//}
//
////int main()
////{
//// printf("--------------------RUNNING HISTOGRAM EXAMPLE------------------------- \n");
//// int input_size = 1024;
//// int histogram_buckets = 10;
//// run_histogram_gpu(input_size,histogram_buckets);
////
//// system("pause");
//// return 0;
////} |
661da2a9d65eacc77248f456f18a06647d490312.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <gsl/gsl_rng.h>
//shamelessly stolen from cuda gems nbody code
#define EPS2 0.00000001
#define NThreads 1024
__device__ float4
bodyBodyInteraction(float4 bi, float4 bj, float4 ai)
{
float3 r;
// r_ij [3 FLOPS]
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + ai.w;
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f/sqrtf(distSixth);
// s = m_j * invDistCube [1 FLOP]
float s = bj.w * invDistCube;
// a_i = a_i + s * r_ij [6 FLOPS]
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
__device__ float4
tile_calculation(float4 myPosition, float4 accel){
long long int i;
float4 *shPosition = SharedMemory<float4>();
#pragma unroll 32
for (i = 0; i < blockDim.x; i++) {
accel = bodyBodyInteraction(myPosition, shPosition[i], accel);
}
return accel;
}
__global__ void
calculate_forces(void *devXsource, void * devXsink, void *devA, int Nsource, int Nsink, int numdevs)
{
float4 *shPosition = SharedMemory<float4>();
float4 *globalXsource = (float4 *)devXsource;
float4 *globalXsink = (float4 *)devXsink;
float4 *globalA = (float4 *)devA;
float4 myPosition;
int i, tile;
float4 acc;
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
//if (gtid >= Nsink) return;
myPosition = globalXsink[gtid];
acc.x = globalA[gtid].x; acc.y = globalA[gtid].y; acc.z = globalA[gtid].z; acc.w = globalA[gtid].w;
for (i = 0, tile = 0; i < Nsource; i += NThreads, tile++) {
int idx = tile * blockDim.x + threadIdx.x;
shPosition[threadIdx.x] = globalXsource[idx];
__syncthreads();
acc = tile_calculation(myPosition, acc);
__syncthreads();
}
// Save the result in global memory for the integration step.
float4 acc4 = {acc.x, acc.y, acc.z, acc.w};
if(gtid < Nsink) globalA[gtid] = acc4;
}
#include <cassert>
#include <stdio.h>
void gpugravity(float * pos, float *accel, long long int N){
float4 *positions = (float4 *) pos;
float4 *acc = (float4 *) accel;
int numdevs = 0;
hipGetDeviceCount(&numdevs);
//numdevs = 1;//debug
hipStream_t * streams = new hipStream_t[numdevs];
hipEvent_t * events = new hipEvent_t[numdevs];
int * devicesinks = new int[numdevs];
size_t *offset = new size_t[numdevs];
size_t total_offset = 0;
int remainingsinks = N;
int allotment = N/numdevs;
int d_sourcesize = N*sizeof(float4);
float4 ** d_pos = new float4 *[numdevs];
float4 ** d_acc = new float4 *[numdevs];
for(int i = 0; i < numdevs; i++){
hipSetDevice(i);
//create the streams and events
hipStreamCreate(&streams[i]);
hipEventCreate(&events[i]);
//figure out how many sinks to give each device
if (remainingsinks > allotment) devicesinks[i] = allotment;
else devicesinks[i] = allotment;
remainingsinks -= devicesinks[i];
//calculate the offset for each device
offset[i] = total_offset;
total_offset += devicesinks[i];
printf("Device %d has %d sinks and an offset of %d\n There are %d particles remaining\n\n",i,devicesinks[i],offset[i],remainingsinks);
int d_sinksize = devicesinks[i] *sizeof(float4);
hipMalloc((void **) &d_pos[i],d_sourcesize);
hipMalloc((void **) &d_acc[i],d_sinksize);
}
for(int i = 0; i < numdevs; i++){
hipSetDevice(i);
int d_sinksize = devicesinks[i] *sizeof(float4);
hipMemcpyAsync(d_pos[i], positions, d_sourcesize,hipMemcpyHostToDevice,streams[i]);
hipMemcpyAsync(d_acc[i], &(acc[offset[i]]), d_sinksize,hipMemcpyHostToDevice,streams[i]);
}
for(int i = 0; i < numdevs; i++){
hipSetDevice(i);
hipLaunchKernelGGL(( calculate_forces), dim3((devicesinks[i]+NThreads-1)/NThreads),dim3(NThreads),NThreads*sizeof(float4),streams[i], d_pos[i],&((d_pos[i])[offset[i]]), d_acc[i] ,N, devicesinks[i],numdevs);
}
for(int i = 0; i < numdevs; i++){
hipSetDevice(i);
int d_sinksize = devicesinks[i] *sizeof(float4);
hipMemcpyAsync(&(acc[offset[i]]),d_acc[i],d_sinksize,hipMemcpyDeviceToHost,streams[i]);
hipEventRecord(events[i],streams[i]);
}
//wait for all devices to complete
for(int i = 0; i < numdevs; i++){
hipSetDevice(i);
hipEventSynchronize(events[i]);
hipDeviceSynchronize();
}
for(int i = 0; i < numdevs; i++){
hipSetDevice(i); hipFree(d_pos[i]); hipFree(d_acc[i]);
}
delete[] offset; delete[] devicesinks; delete[] d_pos; delete[] d_acc;
}
| 661da2a9d65eacc77248f456f18a06647d490312.cu | #include <stdio.h>
#include <gsl/gsl_rng.h>
//shamelessly stolen from cuda gems nbody code
#define EPS2 0.00000001
#define NThreads 1024
__device__ float4
bodyBodyInteraction(float4 bi, float4 bj, float4 ai)
{
float3 r;
// r_ij [3 FLOPS]
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + ai.w;
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f/sqrtf(distSixth);
// s = m_j * invDistCube [1 FLOP]
float s = bj.w * invDistCube;
// a_i = a_i + s * r_ij [6 FLOPS]
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
__device__ float4
tile_calculation(float4 myPosition, float4 accel){
long long int i;
float4 *shPosition = SharedMemory<float4>();
#pragma unroll 32
for (i = 0; i < blockDim.x; i++) {
accel = bodyBodyInteraction(myPosition, shPosition[i], accel);
}
return accel;
}
__global__ void
calculate_forces(void *devXsource, void * devXsink, void *devA, int Nsource, int Nsink, int numdevs)
{
float4 *shPosition = SharedMemory<float4>();
float4 *globalXsource = (float4 *)devXsource;
float4 *globalXsink = (float4 *)devXsink;
float4 *globalA = (float4 *)devA;
float4 myPosition;
int i, tile;
float4 acc;
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
//if (gtid >= Nsink) return;
myPosition = globalXsink[gtid];
acc.x = globalA[gtid].x; acc.y = globalA[gtid].y; acc.z = globalA[gtid].z; acc.w = globalA[gtid].w;
for (i = 0, tile = 0; i < Nsource; i += NThreads, tile++) {
int idx = tile * blockDim.x + threadIdx.x;
shPosition[threadIdx.x] = globalXsource[idx];
__syncthreads();
acc = tile_calculation(myPosition, acc);
__syncthreads();
}
// Save the result in global memory for the integration step.
float4 acc4 = {acc.x, acc.y, acc.z, acc.w};
if(gtid < Nsink) globalA[gtid] = acc4;
}
#include <cassert>
#include <stdio.h>
void gpugravity(float * pos, float *accel, long long int N){
float4 *positions = (float4 *) pos;
float4 *acc = (float4 *) accel;
int numdevs = 0;
cudaGetDeviceCount(&numdevs);
//numdevs = 1;//debug
cudaStream_t * streams = new cudaStream_t[numdevs];
cudaEvent_t * events = new cudaEvent_t[numdevs];
int * devicesinks = new int[numdevs];
size_t *offset = new size_t[numdevs];
size_t total_offset = 0;
int remainingsinks = N;
int allotment = N/numdevs;
int d_sourcesize = N*sizeof(float4);
float4 ** d_pos = new float4 *[numdevs];
float4 ** d_acc = new float4 *[numdevs];
for(int i = 0; i < numdevs; i++){
cudaSetDevice(i);
//create the streams and events
cudaStreamCreate(&streams[i]);
cudaEventCreate(&events[i]);
//figure out how many sinks to give each device
if (remainingsinks > allotment) devicesinks[i] = allotment;
else devicesinks[i] = allotment;
remainingsinks -= devicesinks[i];
//calculate the offset for each device
offset[i] = total_offset;
total_offset += devicesinks[i];
printf("Device %d has %d sinks and an offset of %d\n There are %d particles remaining\n\n",i,devicesinks[i],offset[i],remainingsinks);
int d_sinksize = devicesinks[i] *sizeof(float4);
cudaMalloc((void **) &d_pos[i],d_sourcesize);
cudaMalloc((void **) &d_acc[i],d_sinksize);
}
for(int i = 0; i < numdevs; i++){
cudaSetDevice(i);
int d_sinksize = devicesinks[i] *sizeof(float4);
cudaMemcpyAsync(d_pos[i], positions, d_sourcesize,cudaMemcpyHostToDevice,streams[i]);
cudaMemcpyAsync(d_acc[i], &(acc[offset[i]]), d_sinksize,cudaMemcpyHostToDevice,streams[i]);
}
for(int i = 0; i < numdevs; i++){
cudaSetDevice(i);
calculate_forces<<<(devicesinks[i]+NThreads-1)/NThreads,NThreads,NThreads*sizeof(float4),streams[i]>>>(d_pos[i],&((d_pos[i])[offset[i]]), d_acc[i] ,N, devicesinks[i],numdevs);
}
for(int i = 0; i < numdevs; i++){
cudaSetDevice(i);
int d_sinksize = devicesinks[i] *sizeof(float4);
cudaMemcpyAsync(&(acc[offset[i]]),d_acc[i],d_sinksize,cudaMemcpyDeviceToHost,streams[i]);
cudaEventRecord(events[i],streams[i]);
}
//wait for all devices to complete
for(int i = 0; i < numdevs; i++){
cudaSetDevice(i);
cudaEventSynchronize(events[i]);
cudaDeviceSynchronize();
}
for(int i = 0; i < numdevs; i++){
cudaSetDevice(i); cudaFree(d_pos[i]); cudaFree(d_acc[i]);
}
delete[] offset; delete[] devicesinks; delete[] d_pos; delete[] d_acc;
}
|
ac1bbdeeab2fefe8867e6aa01859ffe320f7fe8b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__ void funcao1(int *var_gpu)
{
printf("a");
*var_gpu = 2;
}
int main()
{
int var, *var_gpu;
var =0;
hipMalloc((void**)&var_gpu,sizeof(int));
hipMemcpy(var_gpu,&var,sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( funcao1), dim3(10),dim3(10), 0, 0, var_gpu);
hipMemcpy(&var,var_gpu,sizeof(int),hipMemcpyDeviceToHost);
printf("var2 = %d\n",var);
hipFree(var_gpu);
return 0;
} | ac1bbdeeab2fefe8867e6aa01859ffe320f7fe8b.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void funcao1(int *var_gpu)
{
printf("a");
*var_gpu = 2;
}
int main()
{
int var, *var_gpu;
var =0;
cudaMalloc((void**)&var_gpu,sizeof(int));
cudaMemcpy(var_gpu,&var,sizeof(int),cudaMemcpyHostToDevice);
funcao1<<<10,10>>>(var_gpu);
cudaMemcpy(&var,var_gpu,sizeof(int),cudaMemcpyDeviceToHost);
printf("var2 = %d\n",var);
cudaFree(var_gpu);
return 0;
} |
3bdd0131c0a53d6121694c04165f981c3c1096e5.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2018 XIAOLIN WANG (xiaolin.wang@nict.go.jp; arthur.xlw@google.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "LstmLayer.h"
namespace cytonLib
{
Variable* LstmLayer::init(string tag_, Variable* x_, bool bidirection, int hiddenSize_, int numLayers_,
Precision dropout, miopenRNNMode_t rnnMode_)
{
tag=tag_;
x=x_;
maxSeqLen=x->n;
batchSize=x->c;
inputSize=x->h;
hiddenSize=hiddenSize_;
numLayers=numLayers_;
bidirectFactor=bidirection?2:1;
xDescs.init(maxSeqLen, batchSize, inputSize);
int tHiddenSize=hiddenSize*bidirectFactor;
y.resize(maxSeqLen, batchSize, tHiddenSize, 1);
yDescs.init(maxSeqLen, batchSize, tHiddenSize);
int tNumLayers = numLayers*bidirectFactor ;
hx.resize(tNumLayers, batchSize, hiddenSize, 1);
hx.setZero();
cx.resize(tNumLayers, batchSize, hiddenSize, 1);
cx.setZero();
hy.resize(tNumLayers, batchSize, hiddenSize, 1);
cy.resize(tNumLayers, batchSize, hiddenSize, 1);
y.enlarge=false;
checkError(cudnnCreateDropoutDescriptor(&dropoutDesc));
// How much memory does dropout need for states?
// These states are used to generate random numbers internally
// and should not be freed until the RNN descriptor is no longer used
size_t stateSize;
void *states;
checkError(cudnnDropoutGetStatesSize(global.cudnnHandle, &stateSize));
checkError(hipMalloc(&states, stateSize));
Precision tDropOut=dropout;
checkError(cudnnSetDropoutDescriptor(dropoutDesc,
global.cudnnHandle,
tDropOut,
states,
stateSize,
global.rnnDropoutSeed++));
// -------------------------
// Set up the RNN descriptor
// -------------------------
checkError(cudnnCreateRNNDescriptor(&rnnDesc));
rnnMode = rnnMode_;
checkError(cudnnSetRNNDescriptor(global.cudnnHandle, rnnDesc,
hiddenSize,
numLayers,
dropoutDesc,
CUDNN_LINEAR_INPUT, // We can also skip the input matrix transformation
bidirection?CUDNN_BIDIRECTIONAL:CUDNN_UNIDIRECTIONAL,
rnnMode,
// CUDNN_RNN_ALGO_PERSIST_STATIC,
CUDNN_RNN_ALGO_STANDARD,
cudnnDataType));
// -------------------------
// Set up work space and reserved memory
// -------------------------
// void *workspace;
size_t workSize;
size_t reserveSize;
// Need for every pass
checkError(cudnnGetRNNWorkspaceSize(global.cudnnHandle, rnnDesc, maxSeqLen, xDescs.descs, &workSize));
// Only needed in training, shouldn't be touched between passes.
checkError(cudnnGetRNNTrainingReserveSize(global.cudnnHandle, rnnDesc, maxSeqLen, xDescs.descs, &reserveSize));
workspace.resize(workSize, 1);
reserveSpace.resize(reserveSize,1);
w.init(tag, rnnDesc, xDescs.descs[0], numLayers, bidirection, hiddenSize, rnnMode);
checkError(hipDeviceSynchronize());
return &y;
}
void LstmLayer::forward()
{
assert(x->n <= maxSeqLen);
assert(y.c==x->c);
y.resize(x->n, y.c, y.h, y.w);
if(!testMode)
{
checkError(cudnnRNNForwardTraining(global.cudnnHandle, rnnDesc,
x->n, xDescs.descs, x->data, hx.desc, hx.data, cx.desc, cx.data,
w.desc, w.data,
yDescs.descs, y.data, hy.desc, hy.data, cy.desc, cy.data,
workspace.data, workspace.ni, reserveSpace.data, reserveSpace.ni));
}
else
{
checkError(cudnnRNNForwardInference(global.cudnnHandle, rnnDesc,
x->n, xDescs.descs, x->data, hx.desc, hx.data, cx.desc, cx.data,
w.desc, w.data,
yDescs.descs, y.data, hy.desc, hy.data, cy.desc, cy.data,
workspace.data, workspace.ni));
}
}
void LstmLayer::backward()
{
assert(!testMode);
checkError(cudnnRNNBackwardData(global.cudnnHandle, rnnDesc,
x->n, yDescs.descs, y.data, yDescs.descs, y.grad.data, hy.desc, hy.grad.data,
cy.desc, cy.grad.data, w.desc, w.data,
hx.desc, hx.data, cx.desc, cx.data,
xDescs.descs, x->grad.data, hx.desc, hx.grad.data, cx.desc, cx.grad.data,
workspace.data, workspace.ni, reserveSpace.data, reserveSpace.ni ));
}
void LstmLayer::calculateGradient()
{
assert(!testMode);
checkError(cudnnRNNBackwardWeights( global.cudnnHandle, rnnDesc,
x->n, xDescs.descs, x->data, hx.desc, hx.data, yDescs.descs, y.data,
workspace.data, workspace.ni, w.desc, w.grad.data, reserveSpace.data, reserveSpace.ni ));
}
} /* namespace cytonLib */
| 3bdd0131c0a53d6121694c04165f981c3c1096e5.cu | /*
Copyright 2018 XIAOLIN WANG (xiaolin.wang@nict.go.jp; arthur.xlw@google.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "LstmLayer.h"
namespace cytonLib
{
Variable* LstmLayer::init(string tag_, Variable* x_, bool bidirection, int hiddenSize_, int numLayers_,
Precision dropout, cudnnRNNMode_t rnnMode_)
{
tag=tag_;
x=x_;
maxSeqLen=x->n;
batchSize=x->c;
inputSize=x->h;
hiddenSize=hiddenSize_;
numLayers=numLayers_;
bidirectFactor=bidirection?2:1;
xDescs.init(maxSeqLen, batchSize, inputSize);
int tHiddenSize=hiddenSize*bidirectFactor;
y.resize(maxSeqLen, batchSize, tHiddenSize, 1);
yDescs.init(maxSeqLen, batchSize, tHiddenSize);
int tNumLayers = numLayers*bidirectFactor ;
hx.resize(tNumLayers, batchSize, hiddenSize, 1);
hx.setZero();
cx.resize(tNumLayers, batchSize, hiddenSize, 1);
cx.setZero();
hy.resize(tNumLayers, batchSize, hiddenSize, 1);
cy.resize(tNumLayers, batchSize, hiddenSize, 1);
y.enlarge=false;
checkError(cudnnCreateDropoutDescriptor(&dropoutDesc));
// How much memory does dropout need for states?
// These states are used to generate random numbers internally
// and should not be freed until the RNN descriptor is no longer used
size_t stateSize;
void *states;
checkError(cudnnDropoutGetStatesSize(global.cudnnHandle, &stateSize));
checkError(cudaMalloc(&states, stateSize));
Precision tDropOut=dropout;
checkError(cudnnSetDropoutDescriptor(dropoutDesc,
global.cudnnHandle,
tDropOut,
states,
stateSize,
global.rnnDropoutSeed++));
// -------------------------
// Set up the RNN descriptor
// -------------------------
checkError(cudnnCreateRNNDescriptor(&rnnDesc));
rnnMode = rnnMode_;
checkError(cudnnSetRNNDescriptor(global.cudnnHandle, rnnDesc,
hiddenSize,
numLayers,
dropoutDesc,
CUDNN_LINEAR_INPUT, // We can also skip the input matrix transformation
bidirection?CUDNN_BIDIRECTIONAL:CUDNN_UNIDIRECTIONAL,
rnnMode,
// CUDNN_RNN_ALGO_PERSIST_STATIC,
CUDNN_RNN_ALGO_STANDARD,
cudnnDataType));
// -------------------------
// Set up work space and reserved memory
// -------------------------
// void *workspace;
size_t workSize;
size_t reserveSize;
// Need for every pass
checkError(cudnnGetRNNWorkspaceSize(global.cudnnHandle, rnnDesc, maxSeqLen, xDescs.descs, &workSize));
// Only needed in training, shouldn't be touched between passes.
checkError(cudnnGetRNNTrainingReserveSize(global.cudnnHandle, rnnDesc, maxSeqLen, xDescs.descs, &reserveSize));
workspace.resize(workSize, 1);
reserveSpace.resize(reserveSize,1);
w.init(tag, rnnDesc, xDescs.descs[0], numLayers, bidirection, hiddenSize, rnnMode);
checkError(cudaDeviceSynchronize());
return &y;
}
void LstmLayer::forward()
{
assert(x->n <= maxSeqLen);
assert(y.c==x->c);
y.resize(x->n, y.c, y.h, y.w);
if(!testMode)
{
checkError(cudnnRNNForwardTraining(global.cudnnHandle, rnnDesc,
x->n, xDescs.descs, x->data, hx.desc, hx.data, cx.desc, cx.data,
w.desc, w.data,
yDescs.descs, y.data, hy.desc, hy.data, cy.desc, cy.data,
workspace.data, workspace.ni, reserveSpace.data, reserveSpace.ni));
}
else
{
checkError(cudnnRNNForwardInference(global.cudnnHandle, rnnDesc,
x->n, xDescs.descs, x->data, hx.desc, hx.data, cx.desc, cx.data,
w.desc, w.data,
yDescs.descs, y.data, hy.desc, hy.data, cy.desc, cy.data,
workspace.data, workspace.ni));
}
}
void LstmLayer::backward()
{
assert(!testMode);
checkError(cudnnRNNBackwardData(global.cudnnHandle, rnnDesc,
x->n, yDescs.descs, y.data, yDescs.descs, y.grad.data, hy.desc, hy.grad.data,
cy.desc, cy.grad.data, w.desc, w.data,
hx.desc, hx.data, cx.desc, cx.data,
xDescs.descs, x->grad.data, hx.desc, hx.grad.data, cx.desc, cx.grad.data,
workspace.data, workspace.ni, reserveSpace.data, reserveSpace.ni ));
}
void LstmLayer::calculateGradient()
{
assert(!testMode);
checkError(cudnnRNNBackwardWeights( global.cudnnHandle, rnnDesc,
x->n, xDescs.descs, x->data, hx.desc, hx.data, yDescs.descs, y.data,
workspace.data, workspace.ni, w.desc, w.grad.data, reserveSpace.data, reserveSpace.ni ));
}
} /* namespace cytonLib */
|
1a2b4f920edaf3331d71ff9176e8174d21e26f3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudakernel/gemm/gemm.h"
#include "cudakernel/math/math.h"
#include "cudakernel/common/common.h"
#include <hip/hip_fp16.h>
#include <float.h>
#include "kernel_type.h"
#include "conv_common.h"
#define TIMES 4
static std::vector<kernel_info_t> g_kvec;
static bool is_g_kvec_set = false;
#define FAKE_CONV_PARAM \
const int in_hw = 1; const int out_hw = 1; \
const int flt_hw = 1; const int splitk = 1; \
const int in_height = 1; const int in_width = 1; \
const int batch = M; const int num_grp = 1; \
const int num_chl_per_grp = 0; const int num_chl_per_grp_pad = K_pad; \
const int flt_height = 1; const int flt_width = 1; \
const int num_flt_per_grp = 0; const int num_flt_per_grp_pad = N_pad; \
const int out_height = 1; const int out_width = 1; \
const int stride_height = 1; const int stride_width = 1; \
const int pad_height = 0; const int pad_width = 0; \
const int hole_height = 1; const int hole_width = 1;
#define GEMM_FUNC_PARAM \
input0_tmp, \
(int4*)weight, \
final_out, \
kLoopNum, \
in_lut, 0, \
flt_lut, 0, \
in_hw, out_hw, \
flt_hw, splitk, \
in_height, in_width, \
batch, num_grp, \
num_chl_per_grp, num_chl_per_grp_pad, \
flt_height, flt_width, \
num_flt_per_grp, num_flt_per_grp_pad, \
out_height, out_width, \
stride_height, stride_width, \
pad_height, pad_width, \
hole_height, hole_width, \
has_bias, (int4*)bias, \
fuse_param.has_activation, clip_min, \
fuse_param.has_clip, clip_max, \
fuse_param.has_prelu, (const void *) fuse_param.prelu, \
fuse_param.has_elt, (const int4 *) fuse_param.pre_data, \
fuse_param.has_elt_activation, elt_clip_min, \
fuse_param.has_elt_clip, elt_clip_max, \
fuse_param.has_elt_prelu, (const void *) fuse_param.elt_prelu, \
(__half)fuse_param.leaky, (__half)fuse_param.elt_leaky, \
fuse_param.has_concat, concat_offset_v8, \
concat_stride_v8
void init_f1_kvec(std::vector<kernel_info_t> &g_kvec, ppl::common::datatype_t type)
{
if ( type == ppl::common::DATATYPE_FLOAT32 )
{
printf("fp32 unsupported in %s\n", __FUNCTION__);
}
else if ( type == ppl::common::DATATYPE_FLOAT16 )
{
Initialize2spkConvF1KernelContainer(g_kvec);
}
else
{ printf("type unsupported\n"); }
is_g_kvec_set = true;
}
uint64_t PPLGemmCUDAGetBufSize(
const ppl::nn::TensorShape* input_shape,
int transA)
{
auto type = input_shape->GetDataType();
int type_size = ppl::common::GetSizeOfDataType(type);
if(transA){
int pad_size = GetPadSize(type); // ldg 128 bytes
int K = input_shape->GetDim(0);
int M = input_shape->GetDim(1);
int K_pad = Align(K, pad_size);
return M * K_pad * type_size;
}
return 0;
}
unsigned int PPLCUDAGemmGetBiasSize(
const ppl::common::datatype_t type,
const int N,
const bool is_scalar)
{
if(!is_scalar) return 0;
int pad_size = GetPadSize(type); // ldg 128 bytes
int N_pad = Align(N, pad_size);
int type_size = ppl::common::GetSizeOfDataType(type);
return N_pad * type_size;
}
//block size: (32,32,1)
template<typename T>
__global__ void matrix_transpose(
T *output,
T *input,
float scale,
const int in_row,
const int in_col)
{
unsigned int in_x = blockIdx.x*32 + threadIdx.x;
unsigned int in_y = blockIdx.y*32 + threadIdx.y;
unsigned int out_x = blockIdx.y*32 + threadIdx.x;
unsigned int out_y = blockIdx.x*32 + threadIdx.y;
bool in_range = (in_x <= in_col) && (in_y <= in_row);
bool out_range = (out_x <= in_row) && (out_y <= in_col);
__shared__ T smem[32][33];
T value = in_range ? input[in_y*in_col + in_x] : (T)0;
smem[threadIdx.x][threadIdx.y] = value;
__syncthreads();
value = smem[threadIdx.y][threadIdx.x];
float fp_value = (float)value * scale;
if(out_range) output[out_y*in_row + out_x] = (__half)fp_value;
}
template<typename T>
__global__ void scale(T *input, float scale, unsigned int size){
unsigned int off = blockIdx.x*512 + threadIdx.x;
bool in_range = off <= size;
T value = in_range ? input[off] : (T)0;
float fp_value = (float)value;
fp_value = scale * fp_value;
if (in_range) input[off] = (T)fp_value;
}
ppl::common::RetCode PPLCUDAGemmModifyWeights(
const hipStream_t &stream,
ppl::nn::TensorShape* weight_shape,
void* weight,
void* tmp_weight, //if need transpose
const ppl::nn::common::GemmParam *param)
{
int transB = param->transB;
float alpha = param->alpha;
auto type = weight_shape->GetDataType();
int pad_size = GetPadSize(type);
const int dim0 = weight_shape->GetDim(0);//assume padded
const int dim1 = weight_shape->GetDim(1);
if (!transB) {
#define TRANSWEIGHT(Type) \
hipLaunchKernelGGL(( matrix_transpose<Type>), dim3(grid), dim3(block), 0, stream, \
(Type*)tmp_weight, (Type*)weight, alpha, dim0, dim1); \
hipMemcpyAsync((Type*)weight, (Type*)tmp_weight, dim0*dim1*sizeof(Type), \
hipMemcpyDeviceToDevice, stream);
dim3 grid(DivUp(dim1, 32), DivUp(dim0, 32), 1);
dim3 block(32, 32, 1);
weight_shape->SetDim(0, dim1);
weight_shape->SetDim(1, dim0);
switch(type){
case ppl::common::DATATYPE_FLOAT32 : {
TRANSWEIGHT(float)
break;
}
case ppl::common::DATATYPE_FLOAT16 : {
TRANSWEIGHT(__half)
break;
}
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef TRANSWEIGHT
} else if (alpha != 1.f){
int grid_size = DivUp(dim0*dim1, 512);
switch(type){
case ppl::common::DATATYPE_FLOAT32 : {
hipLaunchKernelGGL(( scale<float>), dim3(grid_size), dim3(512), 0, stream, (float*)weight, alpha, dim0*dim1);
break;
}
case ppl::common::DATATYPE_FLOAT16 : {
hipLaunchKernelGGL(( scale<__half>), dim3(grid_size), dim3(512), 0, stream, (__half*)weight, alpha, dim0*dim1);
break;
}
default:
return ppl::common::RC_UNSUPPORTED;
}
}
return ppl::common::RC_SUCCESS;
}
ppl::common::RetCode PPLCUDAGemmModifyBias(
const hipStream_t &stream,
const ppl::nn::TensorShape* bias_shape,
void* bias,
const ppl::nn::common::GemmParam *param)
{
if (param->bias_term) {
auto type = bias_shape->GetDataType();
int pad_size = GetPadSize(type);
float beta = param->beta;
int N = bias_shape->GetDim(0);
int N_pad = Align(N, pad_size);
if (type == ppl::common::DATATYPE_FLOAT32) {
if (bias_shape->IsScalar()) return ppl::common::RC_UNSUPPORTED;
if (beta != 0.f && beta != 1.f){
int grid_size = DivUp(N_pad, 512);
hipLaunchKernelGGL(( scale<float>), dim3(grid_size), dim3(512), 0, stream, (float*)bias, beta, N_pad);
}
} else if (type == ppl::common::DATATYPE_FLOAT16) {
if (bias_shape->IsScalar()) return ppl::common::RC_UNSUPPORTED;
if (beta != 0.f && beta != 1.f){
int grid_size = DivUp(N_pad, 512);
hipLaunchKernelGGL(( scale<__half>), dim3(grid_size), dim3(512), 0, stream, (__half*)bias, beta, N_pad);
}
} else{
return ppl::common::RC_UNSUPPORTED;
}
}
return ppl::common::RC_SUCCESS;
}
int PPLCUDAGemmSelectKernel(
const hipStream_t &stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* weight_shape,
const void* weight,
const void* bias,
const ppl::nn::TensorShape* output_shape,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param)
{
auto type = weight_shape->GetDataType();
if (!is_g_kvec_set) init_f1_kvec(g_kvec, type);
int pad_size = GetPadSize(type);
int transA = param.transA;
int transB = param.transB;
int N_pad = transB ? weight_shape->GetDim(0) : weight_shape->GetDim(1);
int K_pad = transB ? weight_shape->GetDim(1) : weight_shape->GetDim(0);
int M = transA ? input_shape->GetDim(1) : input_shape->GetDim(0);
int concat_offset_v8 = fuse_param.concat_offset / pad_size;
int concat_stride_v8 = fuse_param.concat_stride / pad_size;
int4 *final_out = fuse_param.has_concat ? (int4*)fuse_param.post_concat : (int4*)output;
// fuse configs
__half2 clip_min = __float2half2_rn(fuse_param.clip_min);
__half2 clip_max = __float2half2_rn(fuse_param.clip_max);
__half2 elt_clip_min = __float2half2_rn(fuse_param.elt_clip_min);
__half2 elt_clip_max = __float2half2_rn(fuse_param.elt_clip_max);
bool has_bias = param.bias_term;//beta != 0.f;
float minTime = FLT_MAX;
int best_kid = -1;
float elapsed;
hipEvent_t begin, end;
hipEventCreate(&begin);
hipEventCreate(&end);
//transpose
int4 *input0_tmp = (int4*)input;
if (transA == 1) { // input is shape of (K, M), we need K as the 1st inner dim
dim3 grid(DivUp(K_pad, 32), DivUp(M, 32), 1);
dim3 block(32, 32, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( matrix_transpose<float>), dim3(grid), dim3(block), 0, stream,
(float*)temp_buffer, (float*)input, 1.f, K_pad, M);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( matrix_transpose<__half>), dim3(grid), dim3(block), 0, stream,
(__half*)temp_buffer, (__half*)input, 1.f, K_pad, M);
} else {
return ppl::common::RC_UNSUPPORTED;
}
input0_tmp = (int4*)temp_buffer;
}
for (unsigned int kid = 0; kid < g_kvec.size(); kid++) {
int tile_m_per_cta = g_kvec[kid].tile_m_per_cta;
int tile_n_per_cta = g_kvec[kid].tile_n_per_cta;
int tile_k_per_cta = g_kvec[kid].tile_k_per_cta;
int cta_size_in_thd = g_kvec[kid].cta_size_in_thd;
dim3 block_size, grid_size;
block_size.x = cta_size_in_thd;
block_size.y = 1;
block_size.z = 1;
grid_size.x = DivUp(N_pad, tile_n_per_cta);
grid_size.y = DivUp(M, tile_m_per_cta);
grid_size.z = 1;//num_grp * splitk;
hipEventRecord(begin, stream);
for (int i = 0; i < TIMES; i++) {
if (g_kvec[kid].ktype == CONV_2SPK_F1) {
FAKE_CONV_PARAM
int kLoopNum = DivUp(K_pad, tile_k_per_cta);
lut_t in_lut, flt_lut;
(g_kvec[kid]hipLaunchKernelGGL((.lut_kptr)), dim3(grid_size), dim3(block_size), 0, stream, GEMM_FUNC_PARAM);
}
else {
printf("Error: kernel type error in %s\n", __FUNCTION__);
}
}
hipEventRecord(end, stream);
hipEventSynchronize(end);
hipEventElapsedTime(&elapsed, begin, end);
if (elapsed < minTime){
best_kid = kid;
minTime = elapsed;
}
}
hipEventDestroy(begin);
hipEventDestroy(end);
return best_kid;
}
template<typename T>
ppl::common::RetCode PPLCUDAGemvForwardImp(
const hipStream_t &stream,
const int M,
const int N,
const int K,
const void* input,
const void* weight,
const void* bias,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param);
ppl::common::RetCode PPLCUDAGemmForwardImp(
const hipStream_t &stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* weight_shape,
const void* weight,
const void* bias,
const ppl::nn::TensorShape* output_shape,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param,
const int kid)
{
auto type = weight_shape->GetDataType();
if ( !is_g_kvec_set ) init_f1_kvec(g_kvec, type);
int pad_size = GetPadSize(type);
int transA = param.transA;
int transB = param.transB;
if(!param.transB) return ppl::common::RC_UNSUPPORTED;
int N = transB ? weight_shape->GetDim(0) : weight_shape->GetDim(1);
int K = transB ? weight_shape->GetDim(1) : weight_shape->GetDim(0);
int N_pad = Align(N, pad_size);
int K_pad = Align(K, pad_size);
int M = transA ? input_shape->GetDim(1) : input_shape->GetDim(0);
int concat_offset_v8 = fuse_param.concat_offset / pad_size;
int concat_stride_v8 = fuse_param.concat_stride / pad_size;
int4 *final_out = fuse_param.has_concat ? (int4*)fuse_param.post_concat : (int4*)output;
// fuse configs
__half2 clip_min = __float2half2_rn(fuse_param.clip_min);
__half2 clip_max = __float2half2_rn(fuse_param.clip_max);
__half2 elt_clip_min = __float2half2_rn(fuse_param.elt_clip_min);
__half2 elt_clip_max = __float2half2_rn(fuse_param.elt_clip_max);
ppl::common::RetCode status = ppl::common::RC_SUCCESS;
if(M == 1){
status = PPLCUDAGemvForwardImp<__half>(stream,
M, N, K,
input, weight, bias,
(void*)final_out,
param, temp_buffer, fuse_param);
return status;
}
// kernel configs
int tile_m_per_cta = g_kvec[kid].tile_m_per_cta;
int tile_n_per_cta = g_kvec[kid].tile_n_per_cta;
int tile_k_per_cta = g_kvec[kid].tile_k_per_cta;
int cta_size_in_thd = g_kvec[kid].cta_size_in_thd;
dim3 block_size, grid_size;
block_size.x = cta_size_in_thd;
block_size.y = 1;
block_size.z = 1;
grid_size.x = DivUp(N_pad, tile_n_per_cta);
grid_size.y = DivUp(M, tile_m_per_cta);
grid_size.z = 1;//num_grp * splitk;
int kLoopNum = DivUp(K_pad, tile_k_per_cta);
lut_t in_lut, flt_lut;
bool has_bias = param.bias_term;//beta != 0.f;
int4 *input0_tmp = (int4*)input;
if (transA == 1) {
dim3 grid(DivUp(K_pad, 32), DivUp(M, 32), 1);
dim3 block(32, 32, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
hipLaunchKernelGGL(( matrix_transpose<float>), dim3(grid), dim3(block), 0, stream,
(float*)temp_buffer, (float*)input, 1.f, K_pad, M);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
hipLaunchKernelGGL(( matrix_transpose<__half>), dim3(grid), dim3(block), 0, stream,
(__half*)temp_buffer, (__half*)input, 1.f, K_pad, M);
} else {
return ppl::common::RC_UNSUPPORTED;
}
input0_tmp = (int4*)temp_buffer;
}
FAKE_CONV_PARAM
(g_kvec[kid]hipLaunchKernelGGL((.lut_kptr)), dim3(grid_size), dim3(block_size), 0, stream, GEMM_FUNC_PARAM);
return status;
}
template <typename T>
__device__ __inline__ void fma_v4(const int4 a, const int4 b, int4 &c);
template <>
__device__ __inline__ void fma_v4<__half>(const int4 a, const int4 b, int4 &c){
#if __CUDA_ARCH__ >= 600
((__half2*)&c)[0] = __hfma2(((__half2*)&a)[0], ((__half2*)&b)[0], ((__half2*)&c)[0]);
((__half2*)&c)[1] = __hfma2(((__half2*)&a)[1], ((__half2*)&b)[1], ((__half2*)&c)[1]);
((__half2*)&c)[2] = __hfma2(((__half2*)&a)[2], ((__half2*)&b)[2], ((__half2*)&c)[2]);
((__half2*)&c)[3] = __hfma2(((__half2*)&a)[3], ((__half2*)&b)[3], ((__half2*)&c)[3]);
#else
#endif
}
template <>
__device__ __inline__ void fma_v4<float>(const int4 a, const int4 b, int4 &c){
((float*)&c)[0] = ((float*)&a)[0] * ((float*)&b)[0] + ((float*)&c)[0];
((float*)&c)[1] = ((float*)&a)[1] * ((float*)&b)[1] + ((float*)&c)[1];
((float*)&c)[2] = ((float*)&a)[2] * ((float*)&b)[2] + ((float*)&c)[2];
((float*)&c)[3] = ((float*)&a)[3] * ((float*)&b)[3] + ((float*)&c)[3];
}
template <typename T>
__device__ __inline__ int4 add_v4(const int4 a, const int4 b);
template <>
__device__ __inline__ int4 add_v4<__half>(const int4 a, const int4 b){
int4 res = {0,0,0,0};
#if __CUDA_ARCH__ >= 600
((__half2*)&res)[0] = __hadd2(((__half2*)&a)[0], ((__half2*)&b)[0]);
((__half2*)&res)[1] = __hadd2(((__half2*)&a)[1], ((__half2*)&b)[1]);
((__half2*)&res)[2] = __hadd2(((__half2*)&a)[2], ((__half2*)&b)[2]);
((__half2*)&res)[3] = __hadd2(((__half2*)&a)[3], ((__half2*)&b)[3]);
#else
#endif
return res;
}
template <>
__device__ __inline__ int4 add_v4<float>(const int4 a, const int4 b){
int4 res = {0,0,0,0};
((float*)&res)[0] = ((float*)&a)[0] + ((float*)&b)[0];
((float*)&res)[1] = ((float*)&a)[1] + ((float*)&b)[1];
((float*)&res)[2] = ((float*)&a)[2] + ((float*)&b)[2];
((float*)&res)[3] = ((float*)&a)[3] + ((float*)&b)[3];
return res;
}
template <typename T>
__inline__ __device__ T reduce_v4(int4 data){
T res = (T)0;
for(int i = 0; i < sizeof(int4)/sizeof(T); i++){
res = Math<T,T,T>::add(res, ((T*)&data)[i]);
}
}
template <typename T>
__device__ __inline__ void activation(const int activation, int4 &v){
T *t_v = (T*)&v;
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
if(activation ==1){
for(int i = 0; i < T_NUMS_PER_INT4; i++)
t_v[i] = Math<T,T,T>::ge(t_v[i], (T)0)?
t_v[i] : (T)0;
} else{
for(int i = 0; i < T_NUMS_PER_INT4; i++){
T tmp = expf(t_v[i]);
t_v[i] = tmp * __frcp_rn(tmp + (T)1);
}
}
}
template <>
__device__ __inline__ void activation<__half>(const int activation, int4 &v){
#if __CUDA_ARCH__ >= 600
__half2 *h2_v = (__half2*)&v;
int *int_v = (int*)&v;
if(activation ==1){
for(int i = 0; i < 4; i++)
int_v[i] = __vmaxs2(int_v[i], 0);
} else{
__half2 one = {(__half)1.f, (__half)1.f};
for(int i = 0; i < 4; i++){
__half2 tmp = h2exp(h2_v[i]);
h2_v[i] = __hmul2(tmp, h2rcp(__hadd2(one, tmp)));// __h2div(tmp, __hadd2(one, tmp));
}
}
#else
#endif
}
template<typename T>
__device__ __inline__ void clip(int4 &v, float clip_min, float clip_max){
T *t_v = (T*)&v;
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
for(int i = 0; i < T_NUMS_PER_INT4; i++){
t_v[i] = Math<T,T,T>::ge(t_v[i], (T)clip_min)?
t_v[i] : (T)clip_min;
t_v[i] = Math<T,T,T>::le(t_v[i], (T)clip_max)?
t_v[i] : (T)clip_max;
}
}
//matrix: NxK
// N: pad int4
// K: pad int4
// layout and fuse pattern consistent with gemm
//BLK_TILE_N: min:8
template<typename T, int BLK_TILE_N, int THD_TILE_N_V4, int BLK_SIZE>
__global__ void gemv(void *output,
const void *vec,
const void *matrix,
const void *bias,
const int padK,
const int padN,
const fuse_param_t fuse_param)
{
// blk conofig
// one int4 per thd along K
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
constexpr int BLK_TILE_N_V4 = BLK_TILE_N / T_NUMS_PER_INT4;
constexpr int THD_TILE_N = THD_TILE_N_V4 * T_NUMS_PER_INT4;
constexpr int BLK_SIZE_Y = BLK_TILE_N_V4 / THD_TILE_N_V4;
constexpr int BLK_SIZE_X = BLK_SIZE / BLK_SIZE_Y;
constexpr int BLK_TILE_K = BLK_SIZE_X;
int pad_k_v4 = padK / T_NUMS_PER_INT4;
int pad_n_v4 = padN / T_NUMS_PER_INT4;
int n_id = blockIdx.x*BLK_TILE_N + threadIdx.y*T_NUMS_PER_INT4;
int64_t b_base_v4 = (int64_t)n_id*pad_k_v4;
int4 *matrix_base_v4 = (int4*)matrix + b_base_v4;
int4 reg_c[THD_TILE_N];
int4 reg_b[THD_TILE_N];
bool in_n_range[THD_TILE_N_V4];
int4 reg_a;
int4 zero = {0,0,0,0};
T c[THD_TILE_N] = { T(0) };
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++) c[i] = (T)0;
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
reg_c[i] = zero;
}
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
in_n_range[i] = blockIdx.x*BLK_TILE_N_V4 + threadIdx.y + i*BLK_SIZE_Y < pad_n_v4;
}
// ld global VxM
#pragma unroll
for(int k = 0; k < DivUp(pad_k_v4,BLK_TILE_K); k++){
int64_t off = k*BLK_TILE_K + threadIdx.x;
bool in_range = off < pad_k_v4;
reg_a = in_range? ((int4*)vec)[off] : zero;
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
#pragma unroll
for(int j = 0; j < T_NUMS_PER_INT4; j++){
reg_b[i*T_NUMS_PER_INT4 + j] = in_n_range[i] && in_range ?
matrix_base_v4[(i*T_NUMS_PER_INT4*BLK_SIZE_Y+j)*pad_k_v4 + off]
: zero;
fma_v4<T>(reg_a, reg_b[i*T_NUMS_PER_INT4 + j],
reg_c[i*T_NUMS_PER_INT4 + j]);
}
}
}
// int4 reduce to half
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
#pragma unroll
for(int n = 0; n < T_NUMS_PER_INT4; n++){
c[i] = Math<T,T,T>::add( ((T*)reg_c)[i*T_NUMS_PER_INT4 + n],
c[i]);
}
}
__shared__ T smem[BLK_SIZE_X*BLK_TILE_N];
int reduce_off = (threadIdx.y*THD_TILE_N)*BLK_SIZE_X + threadIdx.x;
constexpr int REDUCE_SIZE = BLK_SIZE_X;
if(REDUCE_SIZE >= 64){
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
smem[reduce_off + i*BLK_SIZE_X] = c[i];
}
__syncthreads();
}
//reduce
if(REDUCE_SIZE >= 1024){
if(threadIdx.x < 512)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[512 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 512){
if(threadIdx.x < 256)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[256 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 256){
if(threadIdx.x < 128)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[128 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 128){
if(threadIdx.x < 64)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[64 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
unsigned FULL_MASK = __activemask();
if (REDUCE_SIZE >= 64) {
if(threadIdx.x < 32){
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[reduce_off + i*BLK_SIZE_X + 32]);
}
}
if(threadIdx.x < 32){
if (REDUCE_SIZE >= 32) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 16));
}
if (REDUCE_SIZE >= 16) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 8));
}
if (REDUCE_SIZE >= 8) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 4));
}
if (REDUCE_SIZE >= 4) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 2));
}
if (REDUCE_SIZE >= 2) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 1));
}
}
// shared shuffle
int4 *smem_v4 = (int4*)smem;
if (threadIdx.x == 0) {
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
smem_v4[i*BLK_SIZE_Y + threadIdx.y] = ((int4*)c)[i];
}
}
__syncthreads();
int tid = threadIdx.y*BLK_SIZE_X + threadIdx.x;
for(int thd_off = tid; thd_off < BLK_TILE_N_V4; thd_off += BLK_SIZE){
int out_off = blockIdx.x*BLK_TILE_N_V4 + thd_off;
bool in_output_range = out_off < pad_n_v4;
if(in_output_range){
int4 bias_data = bias!=NULL? ((int4*)bias)[out_off] : zero;
//TODO add bias
int4 out = add_v4<T>(smem_v4[thd_off], bias_data);
// fuse
if(fuse_param.has_activation) activation<T>(fuse_param.has_activation, out);
if(fuse_param.has_clip) clip<T>(out, fuse_param.clip_min, fuse_param.clip_max);
int concatV4_off = 0;
if(fuse_param.has_concat){
int concat_offset_v4 = fuse_param.concat_offset / T_NUMS_PER_INT4;
int concat_stride_v4 = fuse_param.concat_stride / T_NUMS_PER_INT4;
concatV4_off = concat_offset_v4 + blockIdx.y*concat_stride_v4;
out_off += concatV4_off;
}
((int4*)output)[out_off] = out;
}
}
}
template<typename T>
ppl::common::RetCode PPLCUDAGemvForwardImp(
const hipStream_t &stream,
const int M,
const int N,
const int K,
const void* input,
const void* weight,
const void* bias,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param)
{
if(!param.transB) return ppl::common::RC_UNSUPPORTED;
constexpr int ELEM_NUM_PR_LD = sizeof(int4)/sizeof(T);
constexpr int expect_blocks = 64;
//constexpr int MAX_BLK_SIZE = 256;
//constexpr int MAX_THD_TILE_N_V4 = 4;
int n_v4 = N / ELEM_NUM_PR_LD;
int blk_tile_n_v4 = DivUp(n_v4, expect_blocks/M);
#define LAUNCH_KERNEL(){ \
constexpr int BLK_TILE_N = BLK_SIZE_Y * THD_TILE_N_V4 * ELEM_NUM_PR_LD; \
constexpr int BLK_SIZE = BLK_SIZE_Y * BLK_SIZE_X; \
dim3 grid; \
grid.x = DivUp(N, BLK_TILE_N); \
grid.y = 1; grid.z = 1; \
dim3 threads = dim3(BLK_SIZE_X, BLK_SIZE_Y,1); \
hipLaunchKernelGGL(( gemv<T, BLK_TILE_N, THD_TILE_N_V4, BLK_SIZE>), dim3(grid), dim3(threads), 0, stream, \
output, input, weight, bias, K, N, fuse_param); \
}
#define CONFIG_KERNEL(_blk_tile_n_v4){ \
if(BLK_SIZE_X <= 64 && blk_tile_n_v4 >= 16){ \
constexpr int THD_TILE_N_V4 = 4; \
constexpr int BLK_SIZE_Y = 4; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 8){ \
constexpr int THD_TILE_N_V4 = 4; \
constexpr int BLK_SIZE_Y = 2; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 4){ \
constexpr int THD_TILE_N_V4 = 2; \
constexpr int BLK_SIZE_Y = 2; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 2){ \
constexpr int THD_TILE_N_V4 = 2; \
constexpr int BLK_SIZE_Y = 1; \
LAUNCH_KERNEL(); \
} else{ \
constexpr int THD_TILE_N_V4 = 1; \
constexpr int BLK_SIZE_Y = 1; \
LAUNCH_KERNEL(); \
} \
}
if (K >= 512){
constexpr int BLK_SIZE_X = 64;
CONFIG_KERNEL(blk_tile_n_v4);
}
else{
constexpr int BLK_SIZE_X = 32;
CONFIG_KERNEL(blk_tile_n_v4);
}
return ppl::common::RC_SUCCESS;
}
| 1a2b4f920edaf3331d71ff9176e8174d21e26f3f.cu | #include "cudakernel/gemm/gemm.h"
#include "cudakernel/math/math.h"
#include "cudakernel/common/common.h"
#include <cuda_fp16.h>
#include <float.h>
#include "kernel_type.h"
#include "conv_common.h"
#define TIMES 4
static std::vector<kernel_info_t> g_kvec;
static bool is_g_kvec_set = false;
#define FAKE_CONV_PARAM \
const int in_hw = 1; const int out_hw = 1; \
const int flt_hw = 1; const int splitk = 1; \
const int in_height = 1; const int in_width = 1; \
const int batch = M; const int num_grp = 1; \
const int num_chl_per_grp = 0; const int num_chl_per_grp_pad = K_pad; \
const int flt_height = 1; const int flt_width = 1; \
const int num_flt_per_grp = 0; const int num_flt_per_grp_pad = N_pad; \
const int out_height = 1; const int out_width = 1; \
const int stride_height = 1; const int stride_width = 1; \
const int pad_height = 0; const int pad_width = 0; \
const int hole_height = 1; const int hole_width = 1;
#define GEMM_FUNC_PARAM \
input0_tmp, \
(int4*)weight, \
final_out, \
kLoopNum, \
in_lut, 0, \
flt_lut, 0, \
in_hw, out_hw, \
flt_hw, splitk, \
in_height, in_width, \
batch, num_grp, \
num_chl_per_grp, num_chl_per_grp_pad, \
flt_height, flt_width, \
num_flt_per_grp, num_flt_per_grp_pad, \
out_height, out_width, \
stride_height, stride_width, \
pad_height, pad_width, \
hole_height, hole_width, \
has_bias, (int4*)bias, \
fuse_param.has_activation, clip_min, \
fuse_param.has_clip, clip_max, \
fuse_param.has_prelu, (const void *) fuse_param.prelu, \
fuse_param.has_elt, (const int4 *) fuse_param.pre_data, \
fuse_param.has_elt_activation, elt_clip_min, \
fuse_param.has_elt_clip, elt_clip_max, \
fuse_param.has_elt_prelu, (const void *) fuse_param.elt_prelu, \
(__half)fuse_param.leaky, (__half)fuse_param.elt_leaky, \
fuse_param.has_concat, concat_offset_v8, \
concat_stride_v8
void init_f1_kvec(std::vector<kernel_info_t> &g_kvec, ppl::common::datatype_t type)
{
if ( type == ppl::common::DATATYPE_FLOAT32 )
{
printf("fp32 unsupported in %s\n", __FUNCTION__);
}
else if ( type == ppl::common::DATATYPE_FLOAT16 )
{
Initialize2spkConvF1KernelContainer(g_kvec);
}
else
{ printf("type unsupported\n"); }
is_g_kvec_set = true;
}
uint64_t PPLGemmCUDAGetBufSize(
const ppl::nn::TensorShape* input_shape,
int transA)
{
auto type = input_shape->GetDataType();
int type_size = ppl::common::GetSizeOfDataType(type);
if(transA){
int pad_size = GetPadSize(type); // ldg 128 bytes
int K = input_shape->GetDim(0);
int M = input_shape->GetDim(1);
int K_pad = Align(K, pad_size);
return M * K_pad * type_size;
}
return 0;
}
unsigned int PPLCUDAGemmGetBiasSize(
const ppl::common::datatype_t type,
const int N,
const bool is_scalar)
{
if(!is_scalar) return 0;
int pad_size = GetPadSize(type); // ldg 128 bytes
int N_pad = Align(N, pad_size);
int type_size = ppl::common::GetSizeOfDataType(type);
return N_pad * type_size;
}
//block size: (32,32,1)
template<typename T>
__global__ void matrix_transpose(
T *output,
T *input,
float scale,
const int in_row,
const int in_col)
{
unsigned int in_x = blockIdx.x*32 + threadIdx.x;
unsigned int in_y = blockIdx.y*32 + threadIdx.y;
unsigned int out_x = blockIdx.y*32 + threadIdx.x;
unsigned int out_y = blockIdx.x*32 + threadIdx.y;
bool in_range = (in_x <= in_col) && (in_y <= in_row);
bool out_range = (out_x <= in_row) && (out_y <= in_col);
__shared__ T smem[32][33];
T value = in_range ? input[in_y*in_col + in_x] : (T)0;
smem[threadIdx.x][threadIdx.y] = value;
__syncthreads();
value = smem[threadIdx.y][threadIdx.x];
float fp_value = (float)value * scale;
if(out_range) output[out_y*in_row + out_x] = (__half)fp_value;
}
template<typename T>
__global__ void scale(T *input, float scale, unsigned int size){
unsigned int off = blockIdx.x*512 + threadIdx.x;
bool in_range = off <= size;
T value = in_range ? input[off] : (T)0;
float fp_value = (float)value;
fp_value = scale * fp_value;
if (in_range) input[off] = (T)fp_value;
}
ppl::common::RetCode PPLCUDAGemmModifyWeights(
const cudaStream_t &stream,
ppl::nn::TensorShape* weight_shape,
void* weight,
void* tmp_weight, //if need transpose
const ppl::nn::common::GemmParam *param)
{
int transB = param->transB;
float alpha = param->alpha;
auto type = weight_shape->GetDataType();
int pad_size = GetPadSize(type);
const int dim0 = weight_shape->GetDim(0);//assume padded
const int dim1 = weight_shape->GetDim(1);
if (!transB) {
#define TRANSWEIGHT(Type) \
matrix_transpose<Type><<<grid, block, 0, stream>>> \
((Type*)tmp_weight, (Type*)weight, alpha, dim0, dim1); \
cudaMemcpyAsync((Type*)weight, (Type*)tmp_weight, dim0*dim1*sizeof(Type), \
cudaMemcpyDeviceToDevice, stream);
dim3 grid(DivUp(dim1, 32), DivUp(dim0, 32), 1);
dim3 block(32, 32, 1);
weight_shape->SetDim(0, dim1);
weight_shape->SetDim(1, dim0);
switch(type){
case ppl::common::DATATYPE_FLOAT32 : {
TRANSWEIGHT(float)
break;
}
case ppl::common::DATATYPE_FLOAT16 : {
TRANSWEIGHT(__half)
break;
}
default:
return ppl::common::RC_UNSUPPORTED;
}
#undef TRANSWEIGHT
} else if (alpha != 1.f){
int grid_size = DivUp(dim0*dim1, 512);
switch(type){
case ppl::common::DATATYPE_FLOAT32 : {
scale<float><<<grid_size, 512, 0, stream>>>((float*)weight, alpha, dim0*dim1);
break;
}
case ppl::common::DATATYPE_FLOAT16 : {
scale<__half><<<grid_size, 512, 0, stream>>>((__half*)weight, alpha, dim0*dim1);
break;
}
default:
return ppl::common::RC_UNSUPPORTED;
}
}
return ppl::common::RC_SUCCESS;
}
ppl::common::RetCode PPLCUDAGemmModifyBias(
const cudaStream_t &stream,
const ppl::nn::TensorShape* bias_shape,
void* bias,
const ppl::nn::common::GemmParam *param)
{
if (param->bias_term) {
auto type = bias_shape->GetDataType();
int pad_size = GetPadSize(type);
float beta = param->beta;
int N = bias_shape->GetDim(0);
int N_pad = Align(N, pad_size);
if (type == ppl::common::DATATYPE_FLOAT32) {
if (bias_shape->IsScalar()) return ppl::common::RC_UNSUPPORTED;
if (beta != 0.f && beta != 1.f){
int grid_size = DivUp(N_pad, 512);
scale<float><<<grid_size, 512, 0, stream>>>((float*)bias, beta, N_pad);
}
} else if (type == ppl::common::DATATYPE_FLOAT16) {
if (bias_shape->IsScalar()) return ppl::common::RC_UNSUPPORTED;
if (beta != 0.f && beta != 1.f){
int grid_size = DivUp(N_pad, 512);
scale<__half><<<grid_size, 512, 0, stream>>>((__half*)bias, beta, N_pad);
}
} else{
return ppl::common::RC_UNSUPPORTED;
}
}
return ppl::common::RC_SUCCESS;
}
int PPLCUDAGemmSelectKernel(
const cudaStream_t &stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* weight_shape,
const void* weight,
const void* bias,
const ppl::nn::TensorShape* output_shape,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param)
{
auto type = weight_shape->GetDataType();
if (!is_g_kvec_set) init_f1_kvec(g_kvec, type);
int pad_size = GetPadSize(type);
int transA = param.transA;
int transB = param.transB;
int N_pad = transB ? weight_shape->GetDim(0) : weight_shape->GetDim(1);
int K_pad = transB ? weight_shape->GetDim(1) : weight_shape->GetDim(0);
int M = transA ? input_shape->GetDim(1) : input_shape->GetDim(0);
int concat_offset_v8 = fuse_param.concat_offset / pad_size;
int concat_stride_v8 = fuse_param.concat_stride / pad_size;
int4 *final_out = fuse_param.has_concat ? (int4*)fuse_param.post_concat : (int4*)output;
// fuse configs
__half2 clip_min = __float2half2_rn(fuse_param.clip_min);
__half2 clip_max = __float2half2_rn(fuse_param.clip_max);
__half2 elt_clip_min = __float2half2_rn(fuse_param.elt_clip_min);
__half2 elt_clip_max = __float2half2_rn(fuse_param.elt_clip_max);
bool has_bias = param.bias_term;//beta != 0.f;
float minTime = FLT_MAX;
int best_kid = -1;
float elapsed;
cudaEvent_t begin, end;
cudaEventCreate(&begin);
cudaEventCreate(&end);
//transpose
int4 *input0_tmp = (int4*)input;
if (transA == 1) { // input is shape of (K, M), we need K as the 1st inner dim
dim3 grid(DivUp(K_pad, 32), DivUp(M, 32), 1);
dim3 block(32, 32, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
matrix_transpose<float><<<grid, block, 0, stream>>>
((float*)temp_buffer, (float*)input, 1.f, K_pad, M);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
matrix_transpose<__half><<<grid, block, 0, stream>>>
((__half*)temp_buffer, (__half*)input, 1.f, K_pad, M);
} else {
return ppl::common::RC_UNSUPPORTED;
}
input0_tmp = (int4*)temp_buffer;
}
for (unsigned int kid = 0; kid < g_kvec.size(); kid++) {
int tile_m_per_cta = g_kvec[kid].tile_m_per_cta;
int tile_n_per_cta = g_kvec[kid].tile_n_per_cta;
int tile_k_per_cta = g_kvec[kid].tile_k_per_cta;
int cta_size_in_thd = g_kvec[kid].cta_size_in_thd;
dim3 block_size, grid_size;
block_size.x = cta_size_in_thd;
block_size.y = 1;
block_size.z = 1;
grid_size.x = DivUp(N_pad, tile_n_per_cta);
grid_size.y = DivUp(M, tile_m_per_cta);
grid_size.z = 1;//num_grp * splitk;
cudaEventRecord(begin, stream);
for (int i = 0; i < TIMES; i++) {
if (g_kvec[kid].ktype == CONV_2SPK_F1) {
FAKE_CONV_PARAM
int kLoopNum = DivUp(K_pad, tile_k_per_cta);
lut_t in_lut, flt_lut;
(g_kvec[kid].lut_kptr)<<<grid_size, block_size, 0, stream>>>(GEMM_FUNC_PARAM);
}
else {
printf("Error: kernel type error in %s\n", __FUNCTION__);
}
}
cudaEventRecord(end, stream);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, begin, end);
if (elapsed < minTime){
best_kid = kid;
minTime = elapsed;
}
}
cudaEventDestroy(begin);
cudaEventDestroy(end);
return best_kid;
}
template<typename T>
ppl::common::RetCode PPLCUDAGemvForwardImp(
const cudaStream_t &stream,
const int M,
const int N,
const int K,
const void* input,
const void* weight,
const void* bias,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param);
ppl::common::RetCode PPLCUDAGemmForwardImp(
const cudaStream_t &stream,
const ppl::nn::TensorShape* input_shape,
const void* input,
const ppl::nn::TensorShape* weight_shape,
const void* weight,
const void* bias,
const ppl::nn::TensorShape* output_shape,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param,
const int kid)
{
auto type = weight_shape->GetDataType();
if ( !is_g_kvec_set ) init_f1_kvec(g_kvec, type);
int pad_size = GetPadSize(type);
int transA = param.transA;
int transB = param.transB;
if(!param.transB) return ppl::common::RC_UNSUPPORTED;
int N = transB ? weight_shape->GetDim(0) : weight_shape->GetDim(1);
int K = transB ? weight_shape->GetDim(1) : weight_shape->GetDim(0);
int N_pad = Align(N, pad_size);
int K_pad = Align(K, pad_size);
int M = transA ? input_shape->GetDim(1) : input_shape->GetDim(0);
int concat_offset_v8 = fuse_param.concat_offset / pad_size;
int concat_stride_v8 = fuse_param.concat_stride / pad_size;
int4 *final_out = fuse_param.has_concat ? (int4*)fuse_param.post_concat : (int4*)output;
// fuse configs
__half2 clip_min = __float2half2_rn(fuse_param.clip_min);
__half2 clip_max = __float2half2_rn(fuse_param.clip_max);
__half2 elt_clip_min = __float2half2_rn(fuse_param.elt_clip_min);
__half2 elt_clip_max = __float2half2_rn(fuse_param.elt_clip_max);
ppl::common::RetCode status = ppl::common::RC_SUCCESS;
if(M == 1){
status = PPLCUDAGemvForwardImp<__half>(stream,
M, N, K,
input, weight, bias,
(void*)final_out,
param, temp_buffer, fuse_param);
return status;
}
// kernel configs
int tile_m_per_cta = g_kvec[kid].tile_m_per_cta;
int tile_n_per_cta = g_kvec[kid].tile_n_per_cta;
int tile_k_per_cta = g_kvec[kid].tile_k_per_cta;
int cta_size_in_thd = g_kvec[kid].cta_size_in_thd;
dim3 block_size, grid_size;
block_size.x = cta_size_in_thd;
block_size.y = 1;
block_size.z = 1;
grid_size.x = DivUp(N_pad, tile_n_per_cta);
grid_size.y = DivUp(M, tile_m_per_cta);
grid_size.z = 1;//num_grp * splitk;
int kLoopNum = DivUp(K_pad, tile_k_per_cta);
lut_t in_lut, flt_lut;
bool has_bias = param.bias_term;//beta != 0.f;
int4 *input0_tmp = (int4*)input;
if (transA == 1) {
dim3 grid(DivUp(K_pad, 32), DivUp(M, 32), 1);
dim3 block(32, 32, 1);
if (type == ppl::common::DATATYPE_FLOAT32) {
matrix_transpose<float><<<grid, block, 0, stream>>>
((float*)temp_buffer, (float*)input, 1.f, K_pad, M);
} else if (type == ppl::common::DATATYPE_FLOAT16) {
matrix_transpose<__half><<<grid, block, 0, stream>>>
((__half*)temp_buffer, (__half*)input, 1.f, K_pad, M);
} else {
return ppl::common::RC_UNSUPPORTED;
}
input0_tmp = (int4*)temp_buffer;
}
FAKE_CONV_PARAM
(g_kvec[kid].lut_kptr)<<<grid_size, block_size, 0, stream>>>(GEMM_FUNC_PARAM);
return status;
}
template <typename T>
__device__ __inline__ void fma_v4(const int4 a, const int4 b, int4 &c);
template <>
__device__ __inline__ void fma_v4<__half>(const int4 a, const int4 b, int4 &c){
#if __CUDA_ARCH__ >= 600
((__half2*)&c)[0] = __hfma2(((__half2*)&a)[0], ((__half2*)&b)[0], ((__half2*)&c)[0]);
((__half2*)&c)[1] = __hfma2(((__half2*)&a)[1], ((__half2*)&b)[1], ((__half2*)&c)[1]);
((__half2*)&c)[2] = __hfma2(((__half2*)&a)[2], ((__half2*)&b)[2], ((__half2*)&c)[2]);
((__half2*)&c)[3] = __hfma2(((__half2*)&a)[3], ((__half2*)&b)[3], ((__half2*)&c)[3]);
#else
#endif
}
template <>
__device__ __inline__ void fma_v4<float>(const int4 a, const int4 b, int4 &c){
((float*)&c)[0] = ((float*)&a)[0] * ((float*)&b)[0] + ((float*)&c)[0];
((float*)&c)[1] = ((float*)&a)[1] * ((float*)&b)[1] + ((float*)&c)[1];
((float*)&c)[2] = ((float*)&a)[2] * ((float*)&b)[2] + ((float*)&c)[2];
((float*)&c)[3] = ((float*)&a)[3] * ((float*)&b)[3] + ((float*)&c)[3];
}
template <typename T>
__device__ __inline__ int4 add_v4(const int4 a, const int4 b);
template <>
__device__ __inline__ int4 add_v4<__half>(const int4 a, const int4 b){
int4 res = {0,0,0,0};
#if __CUDA_ARCH__ >= 600
((__half2*)&res)[0] = __hadd2(((__half2*)&a)[0], ((__half2*)&b)[0]);
((__half2*)&res)[1] = __hadd2(((__half2*)&a)[1], ((__half2*)&b)[1]);
((__half2*)&res)[2] = __hadd2(((__half2*)&a)[2], ((__half2*)&b)[2]);
((__half2*)&res)[3] = __hadd2(((__half2*)&a)[3], ((__half2*)&b)[3]);
#else
#endif
return res;
}
template <>
__device__ __inline__ int4 add_v4<float>(const int4 a, const int4 b){
int4 res = {0,0,0,0};
((float*)&res)[0] = ((float*)&a)[0] + ((float*)&b)[0];
((float*)&res)[1] = ((float*)&a)[1] + ((float*)&b)[1];
((float*)&res)[2] = ((float*)&a)[2] + ((float*)&b)[2];
((float*)&res)[3] = ((float*)&a)[3] + ((float*)&b)[3];
return res;
}
template <typename T>
__inline__ __device__ T reduce_v4(int4 data){
T res = (T)0;
for(int i = 0; i < sizeof(int4)/sizeof(T); i++){
res = Math<T,T,T>::add(res, ((T*)&data)[i]);
}
}
template <typename T>
__device__ __inline__ void activation(const int activation, int4 &v){
T *t_v = (T*)&v;
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
if(activation ==1){
for(int i = 0; i < T_NUMS_PER_INT4; i++)
t_v[i] = Math<T,T,T>::ge(t_v[i], (T)0)?
t_v[i] : (T)0;
} else{
for(int i = 0; i < T_NUMS_PER_INT4; i++){
T tmp = expf(t_v[i]);
t_v[i] = tmp * __frcp_rn(tmp + (T)1);
}
}
}
template <>
__device__ __inline__ void activation<__half>(const int activation, int4 &v){
#if __CUDA_ARCH__ >= 600
__half2 *h2_v = (__half2*)&v;
int *int_v = (int*)&v;
if(activation ==1){
for(int i = 0; i < 4; i++)
int_v[i] = __vmaxs2(int_v[i], 0);
} else{
__half2 one = {(__half)1.f, (__half)1.f};
for(int i = 0; i < 4; i++){
__half2 tmp = h2exp(h2_v[i]);
h2_v[i] = __hmul2(tmp, h2rcp(__hadd2(one, tmp)));// __h2div(tmp, __hadd2(one, tmp));
}
}
#else
#endif
}
template<typename T>
__device__ __inline__ void clip(int4 &v, float clip_min, float clip_max){
T *t_v = (T*)&v;
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
for(int i = 0; i < T_NUMS_PER_INT4; i++){
t_v[i] = Math<T,T,T>::ge(t_v[i], (T)clip_min)?
t_v[i] : (T)clip_min;
t_v[i] = Math<T,T,T>::le(t_v[i], (T)clip_max)?
t_v[i] : (T)clip_max;
}
}
//matrix: NxK
// N: pad int4
// K: pad int4
// layout and fuse pattern consistent with gemm
//BLK_TILE_N: min:8
template<typename T, int BLK_TILE_N, int THD_TILE_N_V4, int BLK_SIZE>
__global__ void gemv(void *output,
const void *vec,
const void *matrix,
const void *bias,
const int padK,
const int padN,
const fuse_param_t fuse_param)
{
// blk conofig
// one int4 per thd along K
constexpr int T_NUMS_PER_INT4 = sizeof(int4) / sizeof(T);
constexpr int BLK_TILE_N_V4 = BLK_TILE_N / T_NUMS_PER_INT4;
constexpr int THD_TILE_N = THD_TILE_N_V4 * T_NUMS_PER_INT4;
constexpr int BLK_SIZE_Y = BLK_TILE_N_V4 / THD_TILE_N_V4;
constexpr int BLK_SIZE_X = BLK_SIZE / BLK_SIZE_Y;
constexpr int BLK_TILE_K = BLK_SIZE_X;
int pad_k_v4 = padK / T_NUMS_PER_INT4;
int pad_n_v4 = padN / T_NUMS_PER_INT4;
int n_id = blockIdx.x*BLK_TILE_N + threadIdx.y*T_NUMS_PER_INT4;
int64_t b_base_v4 = (int64_t)n_id*pad_k_v4;
int4 *matrix_base_v4 = (int4*)matrix + b_base_v4;
int4 reg_c[THD_TILE_N];
int4 reg_b[THD_TILE_N];
bool in_n_range[THD_TILE_N_V4];
int4 reg_a;
int4 zero = {0,0,0,0};
T c[THD_TILE_N] = { T(0) };
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++) c[i] = (T)0;
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
reg_c[i] = zero;
}
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
in_n_range[i] = blockIdx.x*BLK_TILE_N_V4 + threadIdx.y + i*BLK_SIZE_Y < pad_n_v4;
}
// ld global VxM
#pragma unroll
for(int k = 0; k < DivUp(pad_k_v4,BLK_TILE_K); k++){
int64_t off = k*BLK_TILE_K + threadIdx.x;
bool in_range = off < pad_k_v4;
reg_a = in_range? ((int4*)vec)[off] : zero;
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
#pragma unroll
for(int j = 0; j < T_NUMS_PER_INT4; j++){
reg_b[i*T_NUMS_PER_INT4 + j] = in_n_range[i] && in_range ?
matrix_base_v4[(i*T_NUMS_PER_INT4*BLK_SIZE_Y+j)*pad_k_v4 + off]
: zero;
fma_v4<T>(reg_a, reg_b[i*T_NUMS_PER_INT4 + j],
reg_c[i*T_NUMS_PER_INT4 + j]);
}
}
}
// int4 reduce to half
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
#pragma unroll
for(int n = 0; n < T_NUMS_PER_INT4; n++){
c[i] = Math<T,T,T>::add( ((T*)reg_c)[i*T_NUMS_PER_INT4 + n],
c[i]);
}
}
__shared__ T smem[BLK_SIZE_X*BLK_TILE_N];
int reduce_off = (threadIdx.y*THD_TILE_N)*BLK_SIZE_X + threadIdx.x;
constexpr int REDUCE_SIZE = BLK_SIZE_X;
if(REDUCE_SIZE >= 64){
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++){
smem[reduce_off + i*BLK_SIZE_X] = c[i];
}
__syncthreads();
}
//reduce
if(REDUCE_SIZE >= 1024){
if(threadIdx.x < 512)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[512 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 512){
if(threadIdx.x < 256)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[256 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 256){
if(threadIdx.x < 128)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[128 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
if(REDUCE_SIZE >= 128){
if(threadIdx.x < 64)
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
smem[reduce_off + i*BLK_SIZE_X] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[64 + reduce_off + i*BLK_SIZE_X]);
__syncthreads();
}
unsigned FULL_MASK = __activemask();
if (REDUCE_SIZE >= 64) {
if(threadIdx.x < 32){
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(smem[reduce_off + i*BLK_SIZE_X],
smem[reduce_off + i*BLK_SIZE_X + 32]);
}
}
if(threadIdx.x < 32){
if (REDUCE_SIZE >= 32) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 16));
}
if (REDUCE_SIZE >= 16) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 8));
}
if (REDUCE_SIZE >= 8) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 4));
}
if (REDUCE_SIZE >= 4) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 2));
}
if (REDUCE_SIZE >= 2) {
#pragma unroll
for(int i = 0; i < THD_TILE_N; i++)
c[i] = Math<T,T,T>::add(c[i], __shfl_down_sync(FULL_MASK, c[i], 1));
}
}
// shared shuffle
int4 *smem_v4 = (int4*)smem;
if (threadIdx.x == 0) {
#pragma unroll
for(int i = 0; i < THD_TILE_N_V4; i++){
smem_v4[i*BLK_SIZE_Y + threadIdx.y] = ((int4*)c)[i];
}
}
__syncthreads();
int tid = threadIdx.y*BLK_SIZE_X + threadIdx.x;
for(int thd_off = tid; thd_off < BLK_TILE_N_V4; thd_off += BLK_SIZE){
int out_off = blockIdx.x*BLK_TILE_N_V4 + thd_off;
bool in_output_range = out_off < pad_n_v4;
if(in_output_range){
int4 bias_data = bias!=NULL? ((int4*)bias)[out_off] : zero;
//TODO add bias
int4 out = add_v4<T>(smem_v4[thd_off], bias_data);
// fuse
if(fuse_param.has_activation) activation<T>(fuse_param.has_activation, out);
if(fuse_param.has_clip) clip<T>(out, fuse_param.clip_min, fuse_param.clip_max);
int concatV4_off = 0;
if(fuse_param.has_concat){
int concat_offset_v4 = fuse_param.concat_offset / T_NUMS_PER_INT4;
int concat_stride_v4 = fuse_param.concat_stride / T_NUMS_PER_INT4;
concatV4_off = concat_offset_v4 + blockIdx.y*concat_stride_v4;
out_off += concatV4_off;
}
((int4*)output)[out_off] = out;
}
}
}
template<typename T>
ppl::common::RetCode PPLCUDAGemvForwardImp(
const cudaStream_t &stream,
const int M,
const int N,
const int K,
const void* input,
const void* weight,
const void* bias,
void* output,
const ppl::nn::common::GemmParam ¶m,
void* temp_buffer,
const fuse_param_t &fuse_param)
{
if(!param.transB) return ppl::common::RC_UNSUPPORTED;
constexpr int ELEM_NUM_PR_LD = sizeof(int4)/sizeof(T);
constexpr int expect_blocks = 64;
//constexpr int MAX_BLK_SIZE = 256;
//constexpr int MAX_THD_TILE_N_V4 = 4;
int n_v4 = N / ELEM_NUM_PR_LD;
int blk_tile_n_v4 = DivUp(n_v4, expect_blocks/M);
#define LAUNCH_KERNEL(){ \
constexpr int BLK_TILE_N = BLK_SIZE_Y * THD_TILE_N_V4 * ELEM_NUM_PR_LD; \
constexpr int BLK_SIZE = BLK_SIZE_Y * BLK_SIZE_X; \
dim3 grid; \
grid.x = DivUp(N, BLK_TILE_N); \
grid.y = 1; grid.z = 1; \
dim3 threads = dim3(BLK_SIZE_X, BLK_SIZE_Y,1); \
gemv<T, BLK_TILE_N, THD_TILE_N_V4, BLK_SIZE><<<grid, threads, 0, stream>>>\
(output, input, weight, bias, K, N, fuse_param); \
}
#define CONFIG_KERNEL(_blk_tile_n_v4){ \
if(BLK_SIZE_X <= 64 && blk_tile_n_v4 >= 16){ \
constexpr int THD_TILE_N_V4 = 4; \
constexpr int BLK_SIZE_Y = 4; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 8){ \
constexpr int THD_TILE_N_V4 = 4; \
constexpr int BLK_SIZE_Y = 2; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 4){ \
constexpr int THD_TILE_N_V4 = 2; \
constexpr int BLK_SIZE_Y = 2; \
LAUNCH_KERNEL(); \
} else if(blk_tile_n_v4 >= 2){ \
constexpr int THD_TILE_N_V4 = 2; \
constexpr int BLK_SIZE_Y = 1; \
LAUNCH_KERNEL(); \
} else{ \
constexpr int THD_TILE_N_V4 = 1; \
constexpr int BLK_SIZE_Y = 1; \
LAUNCH_KERNEL(); \
} \
}
if (K >= 512){
constexpr int BLK_SIZE_X = 64;
CONFIG_KERNEL(blk_tile_n_v4);
}
else{
constexpr int BLK_SIZE_X = 32;
CONFIG_KERNEL(blk_tile_n_v4);
}
return ppl::common::RC_SUCCESS;
}
|
518be42836793d0a058e890ed7bdcce98dc09151.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/ATen.h"
#include "ATen/TensorUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/hip/AccumulateType.cuh"
#include "ATen/hip/HIPTensorMethods.cuh"
#include "ATen/hip/HIPTypeConversion.cuh"
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHNumerics.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <THHUNN/THHHalfAutoNumerics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace at { namespace native {
namespace {
static const int WARP_SIZE = 32;
__device__ __forceinline__ bool warp_has_collision(int val) {
// Compare our value to the values stored in the next 16 lanes,
// wrapping around at 32. If any pair of values is the same than
// there is a collision in the warp.
bool dup = 0;
const int laneId = threadIdx.x % 32;
#pragma unroll
for (int i = 1; i <= 16; i++) {
dup |= (WARP_SHFL(val, (laneId + i) % 32) == val);
}
return __any(dup) != 0;
}
// parallelizes over features
template <typename scalar_t>
__global__ void embedding_backward_feature_kernel(
int64_t* indices, scalar_t* grad, scalar_t* grad_weight,
int64_t num_indices, int64_t stride, int padding_idx) {
const int feature_dim = blockIdx.x * 4 + threadIdx.x / 32;
if (feature_dim >= stride) {
return;
}
// The strategy here is that each warp handles a single feature
// dimension.
// Within that feature dimension, points in the [batch][element]
// dimension can overlap, and we need to determine if threads want
// to add to the gradient in a colliding manner.
// Typically one would use floating-point atomicAdd() to resolve
// these collisions, but that is non-deterministic if there are
// collisions. Non-determinism for this code is really bad,
// especially in RNNs, and is prone to snowballing error.
// In order to get a deterministic order of execution, we handle
// non-colliding updates separately from colliding ones. Colliding
// updates are serialized in their order of execution by using the
// warp-wide collision detector `warp_has_collision`.
const int laneId = threadIdx.x % 32;
for (int64_t i = laneId; i < num_indices; i += WARP_SIZE) {
const int weight_index = (int)indices[i];
if (weight_index == padding_idx) {
continue;
}
auto value = grad[i * stride + feature_dim];
// FIXME: should we accumulate as accreal?
// Check for collision
if (warp_has_collision(weight_index)) {
// Run all lanes sequentially; warp divergence
for (int i = 0; i < WARP_SIZE; ++i) {
if (laneId == i) {
grad_weight[weight_index * stride + feature_dim] += value;
}
}
} else {
// No collision; warp coherence
grad_weight[weight_index * stride + feature_dim] += value;
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = cuda::acc_type<scalar_t>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = scalar_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = scalar_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = scalar_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int dim) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * dim;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = scalar_cast<accscalar_t>(weights[base_index + i]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += ::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t, accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = ::pow(v, scalar_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = scalar_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkContiguous("embedding_backward", indices_arg);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
auto grad_weight = grad_.type().zeros({num_weights, grad_.size(-1)});
int64_t stride = grad_weight.stride(0);
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
if (num_indices <= 768 && !scale_grad_by_freq) {
dim3 grid(THCCeilDiv(stride, (int64_t) 4));
dim3 block(128);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
hipLaunchKernelGGL(( embedding_backward_feature_kernel), dim3(grid), dim3(block), 0, stream,
indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
num_indices,
stride,
padding_idx);
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
auto sorted_indices = indices.type().tensor(indices.sizes());
auto orig_indices = indices.type().tensor(indices.sizes());
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = indices.type().tensor(indices.sizes());
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
hipLaunchKernelGGL(( embedding_backward_kernel), dim3(grid), dim3(block), 0, stream,
sorted_indices.data<int64_t>(),
orig_indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
num_indices,
stride,
padding_idx);
});
THCudaCheck(hipGetLastError());
return grad_weight;
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkContiguous("embedding_renorm_", self_arg);
checkContiguous("embedding_renorm", indices_arg);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_data = device_ptr(indices.data<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contains duplicates which are not
// adjacent
auto unique_indices = indices.type().tensor(indices.numel());
auto unique_data = device_ptr(unique_indices.data<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
using accscalar_t = cuda::acc_type<cuda_scalar_t>;
hipLaunchKernelGGL(( renorm_kernel), dim3(grid), dim3(block), 128 * sizeof(accscalar_t), stream,
self.data<cuda_scalar_t>(),
unique_indices.data<int64_t>(),
scalar_cast<accscalar_t>(max_norm),
scalar_cast<accscalar_t>(norm_type),
dim);
});
THCudaCheck(hipGetLastError());
return self;
}
}} // namespace at::native
| 518be42836793d0a058e890ed7bdcce98dc09151.cu | #include "ATen/ATen.h"
#include "ATen/TensorUtils.h"
#include "ATen/NativeFunctions.h"
#include "ATen/cuda/AccumulateType.cuh"
#include "ATen/cuda/CUDATensorMethods.cuh"
#include "ATen/cuda/CUDATypeConversion.cuh"
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCNumerics.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
namespace at { namespace native {
namespace {
static const int WARP_SIZE = 32;
__device__ __forceinline__ bool warp_has_collision(int val) {
// Compare our value to the values stored in the next 16 lanes,
// wrapping around at 32. If any pair of values is the same than
// there is a collision in the warp.
bool dup = 0;
const int laneId = threadIdx.x % 32;
#pragma unroll
for (int i = 1; i <= 16; i++) {
dup |= (WARP_SHFL(val, (laneId + i) % 32) == val);
}
return __any(dup) != 0;
}
// parallelizes over features
template <typename scalar_t>
__global__ void embedding_backward_feature_kernel(
int64_t* indices, scalar_t* grad, scalar_t* grad_weight,
int64_t num_indices, int64_t stride, int padding_idx) {
const int feature_dim = blockIdx.x * 4 + threadIdx.x / 32;
if (feature_dim >= stride) {
return;
}
// The strategy here is that each warp handles a single feature
// dimension.
// Within that feature dimension, points in the [batch][element]
// dimension can overlap, and we need to determine if threads want
// to add to the gradient in a colliding manner.
// Typically one would use floating-point atomicAdd() to resolve
// these collisions, but that is non-deterministic if there are
// collisions. Non-determinism for this code is really bad,
// especially in RNNs, and is prone to snowballing error.
// In order to get a deterministic order of execution, we handle
// non-colliding updates separately from colliding ones. Colliding
// updates are serialized in their order of execution by using the
// warp-wide collision detector `warp_has_collision`.
const int laneId = threadIdx.x % 32;
for (int64_t i = laneId; i < num_indices; i += WARP_SIZE) {
const int weight_index = (int)indices[i];
if (weight_index == padding_idx) {
continue;
}
auto value = grad[i * stride + feature_dim];
// FIXME: should we accumulate as accreal?
// Check for collision
if (warp_has_collision(weight_index)) {
// Run all lanes sequentially; warp divergence
for (int i = 0; i < WARP_SIZE; ++i) {
if (laneId == i) {
grad_weight[weight_index * stride + feature_dim] += value;
}
}
} else {
// No collision; warp coherence
grad_weight[weight_index * stride + feature_dim] += value;
}
}
}
template <typename scalar_t>
__global__ void embedding_backward_kernel(
int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
int64_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = cuda::acc_type<scalar_t>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = scalar_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = scalar_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = scalar_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t>
__global__ void renorm_kernel(
scalar_t* weights, int64_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int dim) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * dim;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = scalar_cast<accscalar_t>(weights[base_index + i]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += std::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t, accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = std::pow(v, scalar_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = scalar_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i] *= factor;
}
}
}
} // anonymous namespace
Tensor embedding_backward_cuda(const Tensor & grad_, const Tensor & indices,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkScalarType("embedding_backward", indices_arg, kLong);
checkContiguous("embedding_backward", indices_arg);
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
auto grad_weight = grad_.type().zeros({num_weights, grad_.size(-1)});
int64_t stride = grad_weight.stride(0);
cudaStream_t stream = globalContext().getCurrentCUDAStream();
if (num_indices <= 768 && !scale_grad_by_freq) {
dim3 grid(THCCeilDiv(stride, (int64_t) 4));
dim3 block(128);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
embedding_backward_feature_kernel<<<grid, block, 0, stream>>>(
indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
num_indices,
stride,
padding_idx);
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
auto sorted_indices = indices.type().tensor(indices.sizes());
auto orig_indices = indices.type().tensor(indices.sizes());
using device_ptr = thrust::device_ptr<int64_t>;
// Sort the inputs into sorted with the corresponding indices; we
// don't need a stable or multidimensional sort, so just use Thrust
// directly
{
sorted_indices.copy_(indices);
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Fill sortedOrigIndices with sequential indices
auto count_iter = thrust::counting_iterator<int64_t>(0);
auto orig_data = device_ptr(orig_indices.data<int64_t>());
thrust::copy(policy, count_iter, count_iter + num_indices, orig_data);
// Sort; a stable sort is not required
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data,
ThrustLTOp<int64_t>());
}
Tensor count;
if (scale_grad_by_freq) {
count = indices.type().tensor(indices.sizes());
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
// Compute an increasing sequence per unique item in sortedIndices:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 1 2 3 1 2 1 1 2
auto sorted_data = device_ptr(sorted_indices.data<int64_t>());
auto count_data = device_ptr(count.data<int64_t>());
thrust::inclusive_scan_by_key(
policy,
sorted_data,
sorted_data + num_indices,
thrust::make_constant_iterator(1),
count_data
);
// Take the maximum of each count per unique key in reverse:
// sorted: 2 5 5 5 7 7 8 9 9
// count: 1 3 3 3 2 2 1 2 2
thrust::inclusive_scan_by_key(
policy,
thrust::make_reverse_iterator(sorted_data + num_indices),
thrust::make_reverse_iterator(sorted_data),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::make_reverse_iterator(count_data + num_indices),
thrust::equal_to<int64_t>(),
thrust::maximum<int64_t>()
);
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
dim3 block(32, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
embedding_backward_kernel<<<grid, block, 0, stream>>>(
sorted_indices.data<int64_t>(),
orig_indices.data<int64_t>(),
grad.data<cuda_scalar_t>(),
grad_weight.data<cuda_scalar_t>(),
count.defined() ? count.data<int64_t>() : nullptr,
num_indices,
stride,
padding_idx);
});
THCudaCheck(cudaGetLastError());
return grad_weight;
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkContiguous("embedding_renorm_", self_arg);
checkContiguous("embedding_renorm", indices_arg);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
cudaStream_t stream = globalContext().getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
using device_ptr = thrust::device_ptr<int64_t>;
auto num_indices = indices.numel();
auto indices_data = device_ptr(indices.data<int64_t>());
// FIXME: thrust::unique only removes consecutive elements that are equal.
// We have race conditions when indices contains duplicates which are not
// adjacent
auto unique_indices = indices.type().tensor(indices.numel());
auto unique_data = device_ptr(unique_indices.data<int64_t>());
auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data);
auto num_unique_indices = static_cast<int>(end - unique_data);
dim3 grid(num_unique_indices);
dim3 block(128);
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.type(), "embedding_backward", [&] {
using cuda_scalar_t = cuda::type<scalar_t>;
using accscalar_t = cuda::acc_type<cuda_scalar_t>;
renorm_kernel<<<grid, block, 128 * sizeof(accscalar_t), stream>>>(
self.data<cuda_scalar_t>(),
unique_indices.data<int64_t>(),
scalar_cast<accscalar_t>(max_norm),
scalar_cast<accscalar_t>(norm_type),
dim);
});
THCudaCheck(cudaGetLastError());
return self;
}
}} // namespace at::native
|
ce1bcb687bdff616f59c5af9e5753df2a1a4112f.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
#include "caffe/util/math_functions.hpp"
#include "caffe/common.hpp"
#include "GeneralizedPatchMatch.cuh"
#include "Combo.cuh"
#define ENABLE_VIS 0
struct Parameters
{
std::vector<std::string> layers; //which layers used as content
std::vector<std::string> layernames; //which layers used as content
std::vector<std::string> datanames; //which layers used as content
int patch_size0;
int iter;
};
Combo::Combo()
{
}
Combo::~Combo()
{
}
void Combo::SetGPU(int no)
{
int devCount;
hipGetDeviceCount(&devCount);
wcout << "CUDA Devices: " << endl << endl;
for (int i = 0; i < devCount; ++i)
{
hipDeviceProp_t props;
hipGetDeviceProperties(&props, i);
size_t totalMem = 0;
size_t freeMem = 0;
hipSetDevice(i);
hipMemGetInfo(&freeMem, &totalMem);
wcout << "GPU " << i << ", Name = " << props.name << ", free = " << freeMem << ", total = " << totalMem << endl;
}
hipSetDevice(no);
int num = -1;
size_t totalMem = 0;
size_t freeMem = 0;
hipGetDevice(&num);
hipMemGetInfo(&freeMem, &totalMem);
wcout << "Current GPU = " << num << ", free = " << freeMem << ", total = " << totalMem << endl;
}
bool Combo::LoadA(const char* file_A)
{
img_AL_col = imread(file_A);
if (img_AL_col.empty())
{
cout << "Error: Source image cannot read!" << endl;
waitKey();
return false;
}
img_AL = Mat::zeros(img_AL_col.size(), CV_8UC3);
// convert to grayscale image
{
Mat gray(img_AL_col.size(), CV_8UC3);
cvtColor(img_AL_col, gray, cv::COLOR_BGR2Lab);
#pragma omp parallel for
for (int r = 0; r < img_AL.rows; ++r)
{
for (int c = 0; c < img_AL.cols; ++c)
{
uchar g = gray.at<Vec3b>(r, c)[0];
img_AL.at<Vec3b>(r, c) = Vec3b(g, g, g);
}
}
}
return true;
}
bool Combo::LoadBP(const char* file_BP)
{
img_BPL_col = imread(file_BP);
if (img_BPL_col.empty())
{
cout << "Error: Reference image cannot read!" << endl;
waitKey();
return false;
}
img_BPL = Mat::zeros(img_BPL_col.size(), CV_8UC3);
// convert to grayscale image
{
Mat gray(img_BPL_col.size(), CV_8UC3);
cvtColor(img_BPL_col, gray, cv::COLOR_BGR2Lab);
#pragma omp parallel for
for (int r = 0; r < img_BPL.rows; ++r)
{
for (int c = 0; c < img_BPL.cols; ++c)
{
uchar g = gray.at<Vec3b>(r, c)[0];
img_BPL.at<Vec3b>(r, c) = Vec3b(g, g, g);
}
}
}
return true;
}
void Combo::GetASize(int& width, int& height)
{
width = img_AL.cols;
height = img_AL.rows;
}
void Combo::GetBPSize(int& width, int& height)
{
width = img_BPL.cols;
height = img_BPL.rows;
}
void Combo::ComputeDist(Classifier& classifier_A, Classifier& classifier_B,
FILE* fp_a, FILE* fp_b, const char* ff_a, const char* ff_b)
{
if (img_BPL.empty())
{
printf("Error: Image2 is empty!\n");
return;
}
if(img_AL.empty())
{
printf("Error: Image1 is empty!\n");
return;
}
const int param_size = 9;
int aw = img_AL.cols;
int ah = img_AL.rows;
int bw = img_BPL.cols;
int bh = img_BPL.rows;
int *params_host, *params_device_AB, *params_device_BA;
unsigned int *ann_device_AB, *ann_host_AB, *ann_device_BA, *ann_host_BA;
unsigned int *rann_device_AB, *rann_device_BA;
float *annd_device_AB, *annd_host_AB, *annd_device_BA, *annd_host_BA;
float *rannd_device_AB, *rannd_device_BA;
unsigned char* bgr_device_AB, *bgr_device_BA, *bgr_host_AB, *bgr_host_BA;
//set parameters
Parameters params;
params.layers.push_back("conv5_1/bn");
params.layers.push_back("conv4_1/bn");
params.layers.push_back("conv3_1/bn");
params.layers.push_back("conv2_1/bn");
params.layers.push_back("conv1_1/bn");
std::vector<int> sizes;
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(3);
//scale and enhance
Mat img_BP = img_BPL.clone();
Mat img_A = img_AL.clone();
std::vector<float *> data_A;
data_A.resize(params.layers.size());
std::vector<Dim> data_A_size;
data_A_size.resize(params.layers.size());
classifier_A.Predict(img_A, params.layers, data_A, data_A_size);
std::vector<float *> data_B;
data_B.resize(params.layers.size());
std::vector<Dim> data_B_size;
data_B_size.resize(params.layers.size());
classifier_B.Predict(img_BP, params.layers, data_B, data_B_size);
int full_ann_size_AB = aw * ah;
int full_ann_size_BA = bw * bh;
params_host = (int *)malloc(param_size * sizeof(int));
ann_host_AB = (unsigned int *)malloc(full_ann_size_AB * sizeof(unsigned int));
annd_host_AB = (float *)malloc(full_ann_size_AB * sizeof(float));
bgr_host_AB = (unsigned char*)malloc(full_ann_size_AB * sizeof(unsigned char)* 3);
ann_host_BA = (unsigned int *)malloc(full_ann_size_BA * sizeof(unsigned int));
annd_host_BA = (float *)malloc(full_ann_size_BA * sizeof(float));
bgr_host_BA = (unsigned char*)malloc(full_ann_size_BA * sizeof(unsigned int)* 3);
hipMalloc(¶ms_device_AB, param_size * sizeof(int));
hipMalloc(&ann_device_AB, full_ann_size_AB * sizeof(unsigned int));
hipMalloc(&rann_device_AB, full_ann_size_AB * sizeof(unsigned int));
hipMalloc(&annd_device_AB, full_ann_size_AB * sizeof(float));
hipMalloc(&rannd_device_AB, full_ann_size_AB * sizeof(float));
hipMalloc(&bgr_device_AB, full_ann_size_AB * sizeof(unsigned int));
hipMalloc(¶ms_device_BA, param_size * sizeof(int));
hipMalloc(&ann_device_BA, full_ann_size_BA * sizeof(unsigned int));
hipMalloc(&rann_device_BA, full_ann_size_BA * sizeof(unsigned int));
hipMalloc(&annd_device_BA, full_ann_size_BA * sizeof(float));
hipMalloc(&rannd_device_BA, full_ann_size_BA * sizeof(float));
hipMalloc(&bgr_device_BA, full_ann_size_BA * sizeof(unsigned char));
int numlayer = params.layers.size();
ifstream aflow_input;
aflow_input.open(ff_a);
for (int y = 0; y < ah; y++)
{
for (int x = 0; x < aw; x++)
{
int dx = 0, dy = 0;
aflow_input >> dx;
aflow_input >> dy;
int xbest = x + dx;
int ybest = y + dy;
ann_host_AB[y * aw + x] = XY_TO_INT(xbest, ybest);
}
}
aflow_input.close();
ifstream bflow_input;
bflow_input.open(ff_b);
for (int y = 0; y < bh; y++)
{
for (int x = 0; x < bw; x++)
{
int dx = 0, dy = 0;
bflow_input >> dx;
bflow_input >> dy;
int xbest = x + dx;
int ybest = y + dy;
ann_host_BA[y * bw + x] = XY_TO_INT(xbest, ybest);
}
}
bflow_input.close();
hipMemcpy(ann_device_AB, ann_host_AB, full_ann_size_AB * sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(ann_device_BA, ann_host_BA, full_ann_size_BA * sizeof(unsigned int), hipMemcpyHostToDevice);
dim3 blocksPerGridAB(aw / 20 + 1, ah / 20 + 1, 1);
dim3 blocksPerGridBA(bw / 20 + 1, bh / 20 + 1, 1);
dim3 threadsPerBlock(20, 20, 1);
reverse_flow << <blocksPerGridAB, threadsPerBlock >> >(ann_device_AB, ann_device_BA, rann_device_AB, ah, aw, bh, bw);
reverse_flow << <blocksPerGridBA, threadsPerBlock >> >(ann_device_BA, ann_device_AB, rann_device_BA, bh, bw, ah, aw);
Mat result_AB = reconstruct_avg(img_AL_col, img_BPL_col, ann_host_AB, sizes[numlayer - 1]);
hipMemcpy(ann_host_AB, rann_device_AB, full_ann_size_AB * sizeof(unsigned int), hipMemcpyDeviceToHost);
Mat reverse_AB = reconstruct_avg(img_AL_col, img_AL_col, ann_host_AB, sizes[numlayer - 1]);
fwrite(&ah, sizeof(int), 1, fp_a);
fwrite(&aw, sizeof(int), 1, fp_a);
fwrite(&bh, sizeof(int), 1, fp_b);
fwrite(&bw, sizeof(int), 1, fp_b);
std::vector<uchar> buf;
imencode(".png", result_AB, buf);
int sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_a);
fwrite(&(buf[0]), sizeof(uchar), sz, fp_a);
imencode(".png", reverse_AB, buf);
sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_a);
fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_a);
Mat result_BA = reconstruct_avg(img_BPL_col, img_AL_col, ann_host_BA, sizes[numlayer - 1]);
hipMemcpy(ann_host_BA, rann_device_BA, full_ann_size_BA * sizeof(unsigned int), hipMemcpyDeviceToHost);
Mat reverse_BA = reconstruct_avg(img_BPL_col, img_BPL_col, ann_host_BA, sizes[numlayer - 1]);
imencode(".png", result_BA, buf);
sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_b);
fwrite(&(buf[0]), sizeof(uchar), sz, fp_b);
imencode(".png", reverse_BA, buf);
sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_b);
fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_b);
// compute feature distance for each layer
for (int curr_layer = 0; curr_layer < numlayer; curr_layer++)//from 32 to 512
{
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
int scale = pow(2, 4 - curr_layer);
// error ba
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
//copy to device
hipMemcpy(params_device_AB, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
hipMemcpy(params_device_BA, params_host, param_size * sizeof(int), hipMemcpyHostToDevice);
compute_dist_norm << <blocksPerGridAB, threadsPerBlock >> >(ann_device_AB, annd_device_AB, data_A[curr_layer], data_B[curr_layer], params_device_AB, aw, ah, scale);
compute_dist_norm << <blocksPerGridBA, threadsPerBlock >> >(ann_device_BA, annd_device_BA, data_B[curr_layer], data_A[curr_layer], params_device_BA, bw, bh, scale);
convert_float2bgr << <blocksPerGridAB, threadsPerBlock >> >(annd_device_AB, bgr_device_AB, aw, ah);
convert_float2bgr << <blocksPerGridBA, threadsPerBlock >> >(annd_device_BA, bgr_device_BA, bw, bh);
hipMemcpy(bgr_host_AB, bgr_device_AB, full_ann_size_AB * sizeof(unsigned char), hipMemcpyDeviceToHost);
hipMemcpy(bgr_host_BA, bgr_device_BA, full_ann_size_BA * sizeof(unsigned char), hipMemcpyDeviceToHost);
Mat ebgrAB(ah, aw, CV_8UC1, bgr_host_AB);
Mat ebgrBA(bh, bw, CV_8UC1, bgr_host_BA);
imencode(".png", ebgrAB, buf);
int sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_a);
fwrite(&(buf[0]), sizeof(uchar), sz, fp_a);
imencode(".png", ebgrBA, buf);
sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_b);
fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_b);
// error ab
reverse_dist << <blocksPerGridAB, threadsPerBlock >> >(ann_device_AB, rannd_device_AB, annd_device_BA, aw, ah, bw, bh);
reverse_dist << <blocksPerGridBA, threadsPerBlock >> >(ann_device_BA, rannd_device_BA, annd_device_AB, bw, bh, aw, ah);
convert_float2bgr << <blocksPerGridAB, threadsPerBlock >> >(rannd_device_AB, bgr_device_AB, aw, ah);
convert_float2bgr << <blocksPerGridBA, threadsPerBlock >> >(rannd_device_BA, bgr_device_BA, bw, bh);
hipMemcpy(bgr_host_AB, bgr_device_AB, full_ann_size_AB * sizeof(unsigned char), hipMemcpyDeviceToHost);
hipMemcpy(bgr_host_BA, bgr_device_BA, full_ann_size_BA * sizeof(unsigned char), hipMemcpyDeviceToHost);
Mat rbgrAB(ah, aw, CV_8UC1, bgr_host_AB);
Mat rbgrBA(bh, bw, CV_8UC1, bgr_host_BA);
imencode(".png", rbgrAB, buf);
sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_a);
fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_a);
imencode(".png", rbgrBA, buf);
sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_b);
fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_b);
}
hipFree(params_device_AB);
hipFree(ann_device_AB);
hipFree(rann_device_AB);
hipFree(annd_device_AB);
hipFree(rannd_device_AB);
hipFree(params_device_BA);
hipFree(ann_device_BA);
hipFree(rann_device_BA);
hipFree(annd_device_BA);
hipFree(rannd_device_BA);
hipFree(bgr_device_AB);
hipFree(bgr_device_BA);
free(ann_host_AB);
free(ann_host_BA);
free(annd_host_AB);
free(annd_host_BA);
free(params_host);
free(bgr_host_AB);
free(bgr_host_BA);
for (int i = 0; i < numlayer; i++)
{
hipFree(data_A[i]);
hipFree(data_B[i]);
}
}
| ce1bcb687bdff616f59c5af9e5753df2a1a4112f.cu | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
#include "caffe/util/math_functions.hpp"
#include "caffe/common.hpp"
#include "GeneralizedPatchMatch.cuh"
#include "Combo.cuh"
#define ENABLE_VIS 0
struct Parameters
{
std::vector<std::string> layers; //which layers used as content
std::vector<std::string> layernames; //which layers used as content
std::vector<std::string> datanames; //which layers used as content
int patch_size0;
int iter;
};
Combo::Combo()
{
}
Combo::~Combo()
{
}
void Combo::SetGPU(int no)
{
int devCount;
cudaGetDeviceCount(&devCount);
wcout << "CUDA Devices: " << endl << endl;
for (int i = 0; i < devCount; ++i)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, i);
size_t totalMem = 0;
size_t freeMem = 0;
cudaSetDevice(i);
cudaMemGetInfo(&freeMem, &totalMem);
wcout << "GPU " << i << ", Name = " << props.name << ", free = " << freeMem << ", total = " << totalMem << endl;
}
cudaSetDevice(no);
int num = -1;
size_t totalMem = 0;
size_t freeMem = 0;
cudaGetDevice(&num);
cudaMemGetInfo(&freeMem, &totalMem);
wcout << "Current GPU = " << num << ", free = " << freeMem << ", total = " << totalMem << endl;
}
bool Combo::LoadA(const char* file_A)
{
img_AL_col = imread(file_A);
if (img_AL_col.empty())
{
cout << "Error: Source image cannot read!" << endl;
waitKey();
return false;
}
img_AL = Mat::zeros(img_AL_col.size(), CV_8UC3);
// convert to grayscale image
{
Mat gray(img_AL_col.size(), CV_8UC3);
cvtColor(img_AL_col, gray, cv::COLOR_BGR2Lab);
#pragma omp parallel for
for (int r = 0; r < img_AL.rows; ++r)
{
for (int c = 0; c < img_AL.cols; ++c)
{
uchar g = gray.at<Vec3b>(r, c)[0];
img_AL.at<Vec3b>(r, c) = Vec3b(g, g, g);
}
}
}
return true;
}
bool Combo::LoadBP(const char* file_BP)
{
img_BPL_col = imread(file_BP);
if (img_BPL_col.empty())
{
cout << "Error: Reference image cannot read!" << endl;
waitKey();
return false;
}
img_BPL = Mat::zeros(img_BPL_col.size(), CV_8UC3);
// convert to grayscale image
{
Mat gray(img_BPL_col.size(), CV_8UC3);
cvtColor(img_BPL_col, gray, cv::COLOR_BGR2Lab);
#pragma omp parallel for
for (int r = 0; r < img_BPL.rows; ++r)
{
for (int c = 0; c < img_BPL.cols; ++c)
{
uchar g = gray.at<Vec3b>(r, c)[0];
img_BPL.at<Vec3b>(r, c) = Vec3b(g, g, g);
}
}
}
return true;
}
void Combo::GetASize(int& width, int& height)
{
width = img_AL.cols;
height = img_AL.rows;
}
void Combo::GetBPSize(int& width, int& height)
{
width = img_BPL.cols;
height = img_BPL.rows;
}
void Combo::ComputeDist(Classifier& classifier_A, Classifier& classifier_B,
FILE* fp_a, FILE* fp_b, const char* ff_a, const char* ff_b)
{
if (img_BPL.empty())
{
printf("Error: Image2 is empty!\n");
return;
}
if(img_AL.empty())
{
printf("Error: Image1 is empty!\n");
return;
}
const int param_size = 9;
int aw = img_AL.cols;
int ah = img_AL.rows;
int bw = img_BPL.cols;
int bh = img_BPL.rows;
int *params_host, *params_device_AB, *params_device_BA;
unsigned int *ann_device_AB, *ann_host_AB, *ann_device_BA, *ann_host_BA;
unsigned int *rann_device_AB, *rann_device_BA;
float *annd_device_AB, *annd_host_AB, *annd_device_BA, *annd_host_BA;
float *rannd_device_AB, *rannd_device_BA;
unsigned char* bgr_device_AB, *bgr_device_BA, *bgr_host_AB, *bgr_host_BA;
//set parameters
Parameters params;
params.layers.push_back("conv5_1/bn");
params.layers.push_back("conv4_1/bn");
params.layers.push_back("conv3_1/bn");
params.layers.push_back("conv2_1/bn");
params.layers.push_back("conv1_1/bn");
std::vector<int> sizes;
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(3);
sizes.push_back(3);
//scale and enhance
Mat img_BP = img_BPL.clone();
Mat img_A = img_AL.clone();
std::vector<float *> data_A;
data_A.resize(params.layers.size());
std::vector<Dim> data_A_size;
data_A_size.resize(params.layers.size());
classifier_A.Predict(img_A, params.layers, data_A, data_A_size);
std::vector<float *> data_B;
data_B.resize(params.layers.size());
std::vector<Dim> data_B_size;
data_B_size.resize(params.layers.size());
classifier_B.Predict(img_BP, params.layers, data_B, data_B_size);
int full_ann_size_AB = aw * ah;
int full_ann_size_BA = bw * bh;
params_host = (int *)malloc(param_size * sizeof(int));
ann_host_AB = (unsigned int *)malloc(full_ann_size_AB * sizeof(unsigned int));
annd_host_AB = (float *)malloc(full_ann_size_AB * sizeof(float));
bgr_host_AB = (unsigned char*)malloc(full_ann_size_AB * sizeof(unsigned char)* 3);
ann_host_BA = (unsigned int *)malloc(full_ann_size_BA * sizeof(unsigned int));
annd_host_BA = (float *)malloc(full_ann_size_BA * sizeof(float));
bgr_host_BA = (unsigned char*)malloc(full_ann_size_BA * sizeof(unsigned int)* 3);
cudaMalloc(¶ms_device_AB, param_size * sizeof(int));
cudaMalloc(&ann_device_AB, full_ann_size_AB * sizeof(unsigned int));
cudaMalloc(&rann_device_AB, full_ann_size_AB * sizeof(unsigned int));
cudaMalloc(&annd_device_AB, full_ann_size_AB * sizeof(float));
cudaMalloc(&rannd_device_AB, full_ann_size_AB * sizeof(float));
cudaMalloc(&bgr_device_AB, full_ann_size_AB * sizeof(unsigned int));
cudaMalloc(¶ms_device_BA, param_size * sizeof(int));
cudaMalloc(&ann_device_BA, full_ann_size_BA * sizeof(unsigned int));
cudaMalloc(&rann_device_BA, full_ann_size_BA * sizeof(unsigned int));
cudaMalloc(&annd_device_BA, full_ann_size_BA * sizeof(float));
cudaMalloc(&rannd_device_BA, full_ann_size_BA * sizeof(float));
cudaMalloc(&bgr_device_BA, full_ann_size_BA * sizeof(unsigned char));
int numlayer = params.layers.size();
ifstream aflow_input;
aflow_input.open(ff_a);
for (int y = 0; y < ah; y++)
{
for (int x = 0; x < aw; x++)
{
int dx = 0, dy = 0;
aflow_input >> dx;
aflow_input >> dy;
int xbest = x + dx;
int ybest = y + dy;
ann_host_AB[y * aw + x] = XY_TO_INT(xbest, ybest);
}
}
aflow_input.close();
ifstream bflow_input;
bflow_input.open(ff_b);
for (int y = 0; y < bh; y++)
{
for (int x = 0; x < bw; x++)
{
int dx = 0, dy = 0;
bflow_input >> dx;
bflow_input >> dy;
int xbest = x + dx;
int ybest = y + dy;
ann_host_BA[y * bw + x] = XY_TO_INT(xbest, ybest);
}
}
bflow_input.close();
cudaMemcpy(ann_device_AB, ann_host_AB, full_ann_size_AB * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(ann_device_BA, ann_host_BA, full_ann_size_BA * sizeof(unsigned int), cudaMemcpyHostToDevice);
dim3 blocksPerGridAB(aw / 20 + 1, ah / 20 + 1, 1);
dim3 blocksPerGridBA(bw / 20 + 1, bh / 20 + 1, 1);
dim3 threadsPerBlock(20, 20, 1);
reverse_flow << <blocksPerGridAB, threadsPerBlock >> >(ann_device_AB, ann_device_BA, rann_device_AB, ah, aw, bh, bw);
reverse_flow << <blocksPerGridBA, threadsPerBlock >> >(ann_device_BA, ann_device_AB, rann_device_BA, bh, bw, ah, aw);
Mat result_AB = reconstruct_avg(img_AL_col, img_BPL_col, ann_host_AB, sizes[numlayer - 1]);
cudaMemcpy(ann_host_AB, rann_device_AB, full_ann_size_AB * sizeof(unsigned int), cudaMemcpyDeviceToHost);
Mat reverse_AB = reconstruct_avg(img_AL_col, img_AL_col, ann_host_AB, sizes[numlayer - 1]);
fwrite(&ah, sizeof(int), 1, fp_a);
fwrite(&aw, sizeof(int), 1, fp_a);
fwrite(&bh, sizeof(int), 1, fp_b);
fwrite(&bw, sizeof(int), 1, fp_b);
std::vector<uchar> buf;
imencode(".png", result_AB, buf);
int sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_a);
fwrite(&(buf[0]), sizeof(uchar), sz, fp_a);
imencode(".png", reverse_AB, buf);
sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_a);
fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_a);
Mat result_BA = reconstruct_avg(img_BPL_col, img_AL_col, ann_host_BA, sizes[numlayer - 1]);
cudaMemcpy(ann_host_BA, rann_device_BA, full_ann_size_BA * sizeof(unsigned int), cudaMemcpyDeviceToHost);
Mat reverse_BA = reconstruct_avg(img_BPL_col, img_BPL_col, ann_host_BA, sizes[numlayer - 1]);
imencode(".png", result_BA, buf);
sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_b);
fwrite(&(buf[0]), sizeof(uchar), sz, fp_b);
imencode(".png", reverse_BA, buf);
sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_b);
fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_b);
// compute feature distance for each layer
for (int curr_layer = 0; curr_layer < numlayer; curr_layer++)//from 32 to 512
{
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
int scale = pow(2, 4 - curr_layer);
// error ba
//set parameters
params_host[0] = data_A_size[curr_layer].channel;//channels
params_host[1] = data_A_size[curr_layer].height;
params_host[2] = data_A_size[curr_layer].width;
params_host[3] = data_B_size[curr_layer].height;
params_host[4] = data_B_size[curr_layer].width;
params_host[5] = sizes[curr_layer];
params_host[6] = params.iter;
//copy to device
cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
//set parameters
params_host[0] = data_B_size[curr_layer].channel;//channels
params_host[1] = data_B_size[curr_layer].height;
params_host[2] = data_B_size[curr_layer].width;
params_host[3] = data_A_size[curr_layer].height;
params_host[4] = data_A_size[curr_layer].width;
//copy to device
cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice);
compute_dist_norm << <blocksPerGridAB, threadsPerBlock >> >(ann_device_AB, annd_device_AB, data_A[curr_layer], data_B[curr_layer], params_device_AB, aw, ah, scale);
compute_dist_norm << <blocksPerGridBA, threadsPerBlock >> >(ann_device_BA, annd_device_BA, data_B[curr_layer], data_A[curr_layer], params_device_BA, bw, bh, scale);
convert_float2bgr << <blocksPerGridAB, threadsPerBlock >> >(annd_device_AB, bgr_device_AB, aw, ah);
convert_float2bgr << <blocksPerGridBA, threadsPerBlock >> >(annd_device_BA, bgr_device_BA, bw, bh);
cudaMemcpy(bgr_host_AB, bgr_device_AB, full_ann_size_AB * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaMemcpy(bgr_host_BA, bgr_device_BA, full_ann_size_BA * sizeof(unsigned char), cudaMemcpyDeviceToHost);
Mat ebgrAB(ah, aw, CV_8UC1, bgr_host_AB);
Mat ebgrBA(bh, bw, CV_8UC1, bgr_host_BA);
imencode(".png", ebgrAB, buf);
int sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_a);
fwrite(&(buf[0]), sizeof(uchar), sz, fp_a);
imencode(".png", ebgrBA, buf);
sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_b);
fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_b);
// error ab
reverse_dist << <blocksPerGridAB, threadsPerBlock >> >(ann_device_AB, rannd_device_AB, annd_device_BA, aw, ah, bw, bh);
reverse_dist << <blocksPerGridBA, threadsPerBlock >> >(ann_device_BA, rannd_device_BA, annd_device_AB, bw, bh, aw, ah);
convert_float2bgr << <blocksPerGridAB, threadsPerBlock >> >(rannd_device_AB, bgr_device_AB, aw, ah);
convert_float2bgr << <blocksPerGridBA, threadsPerBlock >> >(rannd_device_BA, bgr_device_BA, bw, bh);
cudaMemcpy(bgr_host_AB, bgr_device_AB, full_ann_size_AB * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaMemcpy(bgr_host_BA, bgr_device_BA, full_ann_size_BA * sizeof(unsigned char), cudaMemcpyDeviceToHost);
Mat rbgrAB(ah, aw, CV_8UC1, bgr_host_AB);
Mat rbgrBA(bh, bw, CV_8UC1, bgr_host_BA);
imencode(".png", rbgrAB, buf);
sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_a);
fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_a);
imencode(".png", rbgrBA, buf);
sz = buf.size();
fwrite(&sz, sizeof(int), 1, fp_b);
fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_b);
}
cudaFree(params_device_AB);
cudaFree(ann_device_AB);
cudaFree(rann_device_AB);
cudaFree(annd_device_AB);
cudaFree(rannd_device_AB);
cudaFree(params_device_BA);
cudaFree(ann_device_BA);
cudaFree(rann_device_BA);
cudaFree(annd_device_BA);
cudaFree(rannd_device_BA);
cudaFree(bgr_device_AB);
cudaFree(bgr_device_BA);
free(ann_host_AB);
free(ann_host_BA);
free(annd_host_AB);
free(annd_host_BA);
free(params_host);
free(bgr_host_AB);
free(bgr_host_BA);
for (int i = 0; i < numlayer; i++)
{
cudaFree(data_A[i]);
cudaFree(data_B[i]);
}
}
|
bb7afc9e5263ea84468bfc6138dc9a748ffe500a.hip | // !!! This is a file automatically generated by hipify!!!
#include "Filters.h"
#include "functions_hip.cuh"
#include <hip/hip_runtime.h>
UnnormalizedFilter::UnnormalizedFilter(
const FilterKernel & kernel_p,
bool check_range):
kernel (kernel_p),
hor_radius ( kernel_p.n_cols / 2),
vert_radius ( kernel_p.n_rows / 2),
check(check_range)
{
_conv_function = &UnnormalizedFilter::_conv_CPU;
}
Pixel UnnormalizedFilter::_conv_GPU(const Image &m) const {
return make_tuple(0, 0, 0);
}
Pixel UnnormalizedFilter::_conv_CPU(const Image &m) const {
double r, g, b, sum_r = 0, sum_g = 0, sum_b = 0;
for (uint i = 0; i < kernel.n_rows; ++i) {
for (uint j = 0; j < kernel.n_cols; ++j) {
r = static_cast<double>(get<0>(m(i, j)));
g = static_cast<double>(get<1>(m(i, j)));
b = static_cast<double>(get<2>(m(i, j)));
r = r * kernel(i, j);
g = g * kernel(i, j);
b = b * kernel(i, j);
sum_r += r;
sum_g += g;
sum_b += b;
}
}
if (check){
if (sum_r > 255)
sum_r = 255;
else if (sum_r < 0)
sum_r = 0;
if (sum_g > 255)
sum_g = 255;
else if (sum_g < 0)
sum_g = 0;
if (sum_b > 255)
sum_b = 255;
else if (sum_b < 0)
sum_b = 0;
}
return make_tuple(sum_r, sum_g, sum_b);
}
Pixel UnnormalizedFilter::operator () (const Image &m) const {
return (this->*_conv_function)(m);
}
Image UnnormalizedFilter::convolve(const Image& img) const {
unsigned char* d_img, *d_res;
unsigned char* img_raw, *img_res;
float* d_ker;
float* host_ker;
unsigned int res_rows = img.n_rows-2*hor_radius;
unsigned int res_cols = img.n_cols-2*vert_radius;
unsigned int hor_size = hor_radius*2 + 1;
unsigned int vert_size = vert_radius*2 + 1;
img_raw = (unsigned char*) malloc(img.n_rows*img.n_cols*3);
img_res = (unsigned char*) malloc(res_rows*res_cols*3);
host_ker = (float*) malloc(hor_size*vert_size*sizeof(float));
CUDA_CHECK_RETURN( hipMalloc(
(void**) &d_img,
img.n_rows*img.n_cols*3) );
CUDA_CHECK_RETURN( hipMalloc(
(void**) &d_res,
res_rows*res_cols*3) );
CUDA_CHECK_RETURN( hipMalloc(
(void**) &d_ker,
hor_size*vert_size*sizeof(float)) );
for (uint i = 0; i < img.n_rows; ++i){
for (uint j = 0; j < img.n_cols; ++j){
img_raw[(i*img.n_cols + j)*3 + 0] = get<0>(img(i, j));
img_raw[(i*img.n_cols + j)*3 + 1] = get<1>(img(i, j));
img_raw[(i*img.n_cols + j)*3 + 2] = get<2>(img(i, j));
}
}
for (uint i = 0; i < hor_size; ++i){
for (uint j = 0; j < vert_size; ++j){
host_ker[i*hor_size + j] = kernel(i,j);
}
}
CUDA_CHECK_RETURN( hipMemcpy(d_img, img_raw,
img.n_rows*img.n_cols*3,
hipMemcpyHostToDevice)
);
CUDA_CHECK_RETURN( hipMemset(d_res,
0,
res_rows*res_cols*3)
);
CUDA_CHECK_RETURN( hipMemcpy(d_ker, host_ker,
hor_size*vert_size*sizeof(float) ,
hipMemcpyHostToDevice)
);
dim3 block_grid(res_rows, res_cols);
dim3 thread_grid(hor_size, vert_size, 3);
hipLaunchKernelGGL(( compute), dim3(block_grid), dim3(thread_grid), 0, 0,
res_rows,
res_cols,
d_img,
d_res,
hor_radius,
vert_radius,
d_ker);
CUDA_CHECK_RETURN( hipMemcpy(img_res, d_res,
res_rows*res_cols*3,
hipMemcpyDeviceToHost)
);
Image res(res_rows, res_cols);
for (uint i = 0; i < res_rows; ++i){
for (uint j = 0; j < res_cols; ++j){
get<0>(res(i, j)) = img_res[(i*res_cols + j)*3 + 0];
get<1>(res(i, j)) = img_res[(i*res_cols + j)*3 + 1];
get<2>(res(i, j)) = img_res[(i*res_cols + j)*3 + 2];
}
}
CUDA_CHECK_RETURN( hipFree(d_img) );
CUDA_CHECK_RETURN( hipFree(d_res) );
free((void*) img_raw);
free((void*) img_res);
return res;
}
FloatFilter::FloatFilter(const FilterKernel & kernel_p):
kernel (kernel_p),
hor_radius (kernel_p.n_cols / 2),
vert_radius (kernel_p.n_rows / 2)
{}
double FloatFilter::operator () (const Image &m) const
{
uint hor_size = 2 * hor_radius + 1;
uint vert_size = 2 * vert_radius + 1;
double r, sum_r = 0;
for (uint i = 0; i < vert_size; ++i) {
for (uint j = 0; j < hor_size; ++j) {
r = static_cast<double>(get<0>(m(i, j)));
r = r * kernel(i, j);
sum_r += r;
}
}
return sum_r;
}
LBPFilter::LBPFilter():
hor_radius (1),
vert_radius (1)
{}
unsigned char LBPFilter::operator () (const Image& m) const
{
char num = 0;
uint cen_pix = get<0>(m(1,1));
if (get<0>(m(1, 2)) >= cen_pix)
num = num | R_MASK;
if (get<0>(m(0, 2)) >= cen_pix)
num = num | RU_MASK;
if (get<0>(m(0, 1)) >= cen_pix)
num = num | U_MASK;
if (get<0>(m(0, 0)) >= cen_pix)
num = num | LU_MASK;
if (get<0>(m(1, 0)) >= cen_pix)
num = num | L_MASK;
if (get<0>(m(2, 0)) >= cen_pix)
num = num | LD_MASK;
if (get<0>(m(2, 1)) >= cen_pix)
num = num | D_MASK;
if (get<0>(m(2, 2)) >= cen_pix)
num = num | RD_MASK;
return num;
}
FilterKernel make_gaussian_kernel(double sigma, int radius){
if (radius < 0)
throw std::invalid_argument("Wrong filter radius");
uint size = radius * 2 + 1;
FilterKernel gauss_kernel(size, size);
double sum = 0.0;
for (uint i = 0; i < size; ++i) {
for (uint j = 0; j < size; ++j) {
gauss_kernel(i, j) = ::exp(-0.5 * ((i - radius)*(i - radius) / (sigma*sigma)
+ (j - radius)*(j - radius) / (sigma*sigma)))
/ (2 * M_PI * sigma*sigma);
sum += gauss_kernel(i, j);
}
}
for (uint i = 0; i < size; ++i){
for (uint j = 0; j < size; ++j){
gauss_kernel(i, j) /= sum;
}
}
return gauss_kernel;
}
| bb7afc9e5263ea84468bfc6138dc9a748ffe500a.cu | #include "Filters.h"
#include "functions.cuh"
#include <cuda_runtime.h>
UnnormalizedFilter::UnnormalizedFilter(
const FilterKernel & kernel_p,
bool check_range):
kernel (kernel_p),
hor_radius ( kernel_p.n_cols / 2),
vert_radius ( kernel_p.n_rows / 2),
check(check_range)
{
_conv_function = &UnnormalizedFilter::_conv_CPU;
}
Pixel UnnormalizedFilter::_conv_GPU(const Image &m) const {
return make_tuple(0, 0, 0);
}
Pixel UnnormalizedFilter::_conv_CPU(const Image &m) const {
double r, g, b, sum_r = 0, sum_g = 0, sum_b = 0;
for (uint i = 0; i < kernel.n_rows; ++i) {
for (uint j = 0; j < kernel.n_cols; ++j) {
r = static_cast<double>(get<0>(m(i, j)));
g = static_cast<double>(get<1>(m(i, j)));
b = static_cast<double>(get<2>(m(i, j)));
r = r * kernel(i, j);
g = g * kernel(i, j);
b = b * kernel(i, j);
sum_r += r;
sum_g += g;
sum_b += b;
}
}
if (check){
if (sum_r > 255)
sum_r = 255;
else if (sum_r < 0)
sum_r = 0;
if (sum_g > 255)
sum_g = 255;
else if (sum_g < 0)
sum_g = 0;
if (sum_b > 255)
sum_b = 255;
else if (sum_b < 0)
sum_b = 0;
}
return make_tuple(sum_r, sum_g, sum_b);
}
Pixel UnnormalizedFilter::operator () (const Image &m) const {
return (this->*_conv_function)(m);
}
Image UnnormalizedFilter::convolve(const Image& img) const {
unsigned char* d_img, *d_res;
unsigned char* img_raw, *img_res;
float* d_ker;
float* host_ker;
unsigned int res_rows = img.n_rows-2*hor_radius;
unsigned int res_cols = img.n_cols-2*vert_radius;
unsigned int hor_size = hor_radius*2 + 1;
unsigned int vert_size = vert_radius*2 + 1;
img_raw = (unsigned char*) malloc(img.n_rows*img.n_cols*3);
img_res = (unsigned char*) malloc(res_rows*res_cols*3);
host_ker = (float*) malloc(hor_size*vert_size*sizeof(float));
CUDA_CHECK_RETURN( cudaMalloc(
(void**) &d_img,
img.n_rows*img.n_cols*3) );
CUDA_CHECK_RETURN( cudaMalloc(
(void**) &d_res,
res_rows*res_cols*3) );
CUDA_CHECK_RETURN( cudaMalloc(
(void**) &d_ker,
hor_size*vert_size*sizeof(float)) );
for (uint i = 0; i < img.n_rows; ++i){
for (uint j = 0; j < img.n_cols; ++j){
img_raw[(i*img.n_cols + j)*3 + 0] = get<0>(img(i, j));
img_raw[(i*img.n_cols + j)*3 + 1] = get<1>(img(i, j));
img_raw[(i*img.n_cols + j)*3 + 2] = get<2>(img(i, j));
}
}
for (uint i = 0; i < hor_size; ++i){
for (uint j = 0; j < vert_size; ++j){
host_ker[i*hor_size + j] = kernel(i,j);
}
}
CUDA_CHECK_RETURN( cudaMemcpy(d_img, img_raw,
img.n_rows*img.n_cols*3,
cudaMemcpyHostToDevice)
);
CUDA_CHECK_RETURN( cudaMemset(d_res,
0,
res_rows*res_cols*3)
);
CUDA_CHECK_RETURN( cudaMemcpy(d_ker, host_ker,
hor_size*vert_size*sizeof(float) ,
cudaMemcpyHostToDevice)
);
dim3 block_grid(res_rows, res_cols);
dim3 thread_grid(hor_size, vert_size, 3);
compute<<<block_grid, thread_grid>>>(
res_rows,
res_cols,
d_img,
d_res,
hor_radius,
vert_radius,
d_ker);
CUDA_CHECK_RETURN( cudaMemcpy(img_res, d_res,
res_rows*res_cols*3,
cudaMemcpyDeviceToHost)
);
Image res(res_rows, res_cols);
for (uint i = 0; i < res_rows; ++i){
for (uint j = 0; j < res_cols; ++j){
get<0>(res(i, j)) = img_res[(i*res_cols + j)*3 + 0];
get<1>(res(i, j)) = img_res[(i*res_cols + j)*3 + 1];
get<2>(res(i, j)) = img_res[(i*res_cols + j)*3 + 2];
}
}
CUDA_CHECK_RETURN( cudaFree(d_img) );
CUDA_CHECK_RETURN( cudaFree(d_res) );
free((void*) img_raw);
free((void*) img_res);
return res;
}
FloatFilter::FloatFilter(const FilterKernel & kernel_p):
kernel (kernel_p),
hor_radius (kernel_p.n_cols / 2),
vert_radius (kernel_p.n_rows / 2)
{}
double FloatFilter::operator () (const Image &m) const
{
uint hor_size = 2 * hor_radius + 1;
uint vert_size = 2 * vert_radius + 1;
double r, sum_r = 0;
for (uint i = 0; i < vert_size; ++i) {
for (uint j = 0; j < hor_size; ++j) {
r = static_cast<double>(get<0>(m(i, j)));
r = r * kernel(i, j);
sum_r += r;
}
}
return sum_r;
}
LBPFilter::LBPFilter():
hor_radius (1),
vert_radius (1)
{}
unsigned char LBPFilter::operator () (const Image& m) const
{
char num = 0;
uint cen_pix = get<0>(m(1,1));
if (get<0>(m(1, 2)) >= cen_pix)
num = num | R_MASK;
if (get<0>(m(0, 2)) >= cen_pix)
num = num | RU_MASK;
if (get<0>(m(0, 1)) >= cen_pix)
num = num | U_MASK;
if (get<0>(m(0, 0)) >= cen_pix)
num = num | LU_MASK;
if (get<0>(m(1, 0)) >= cen_pix)
num = num | L_MASK;
if (get<0>(m(2, 0)) >= cen_pix)
num = num | LD_MASK;
if (get<0>(m(2, 1)) >= cen_pix)
num = num | D_MASK;
if (get<0>(m(2, 2)) >= cen_pix)
num = num | RD_MASK;
return num;
}
FilterKernel make_gaussian_kernel(double sigma, int radius){
if (radius < 0)
throw std::invalid_argument("Wrong filter radius");
uint size = radius * 2 + 1;
FilterKernel gauss_kernel(size, size);
double sum = 0.0;
for (uint i = 0; i < size; ++i) {
for (uint j = 0; j < size; ++j) {
gauss_kernel(i, j) = std::exp(-0.5 * ((i - radius)*(i - radius) / (sigma*sigma)
+ (j - radius)*(j - radius) / (sigma*sigma)))
/ (2 * M_PI * sigma*sigma);
sum += gauss_kernel(i, j);
}
}
for (uint i = 0; i < size; ++i){
for (uint j = 0; j < size; ++j){
gauss_kernel(i, j) /= sum;
}
}
return gauss_kernel;
}
|
a28d1f20d48ed7d8d119b9a16db608340e459469.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pp_dynamic_access_offchip_memory_vecadd_repeat.h"
__global__ void AddVectors(const float* A, const float* B, float* C, int N)
{
int blockStartIndex = blockIdx.x * blockDim.x * N;
int threadStartIndex = blockStartIndex + threadIdx.x;
int threadEndIndex = threadStartIndex + N*blockDim.x;
int i,t;
for (t = 0; t < REPS; t++) {
for( i=threadStartIndex; i<threadEndIndex; i=i+blockDim.x ){
C[i] = A[i] + B[i];
}
}
}
| a28d1f20d48ed7d8d119b9a16db608340e459469.cu | #include "pp_dynamic_access_offchip_memory_vecadd_repeat.h"
__global__ void AddVectors(const float* A, const float* B, float* C, int N)
{
int blockStartIndex = blockIdx.x * blockDim.x * N;
int threadStartIndex = blockStartIndex + threadIdx.x;
int threadEndIndex = threadStartIndex + N*blockDim.x;
int i,t;
for (t = 0; t < REPS; t++) {
for( i=threadStartIndex; i<threadEndIndex; i=i+blockDim.x ){
C[i] = A[i] + B[i];
}
}
}
|
0e360d3804eb82aa7353a5ef10dcb5bba96ebf23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _2DCONVOLUTION_KERNEL_H_
#define _2DCONVOLUTION_KERNEL_H_
#include <stdio.h>
#include "2Dconvolution.h"
// Matrix multiplication kernel thread specification
__global__ void ConvolutionKernel(Matrix N, Matrix P)
{
__shared__ float N_s[BLOCK_SIZE][BLOCK_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y * TILE_SIZE + ty;
int col_o = blockIdx.x * TILE_SIZE + tx;
int n = KERNEL_SIZE/2;
int row_i = row_o - n;
int col_i = col_o - n;
float output = 0.0f;
// ghost element condition
if((row_i >= 0) && (row_i < N.height) &&
(col_i >= 0) && (col_i < N.width) ) {
N_s[ty][tx] = N.elements[row_i*N.width + col_i];
}
else{
N_s[ty][tx] = 0.0f;
}
__syncthreads();
if(ty < TILE_SIZE && tx < TILE_SIZE){
for(int i = 0; i < KERNEL_SIZE; i++) {
for(int j = 0; j < KERNEL_SIZE; j++) {
output += Mc[i][j] * N_s[i+ty][j+tx];
}
}
if(row_o < P.height && col_o < P.width){
P.elements[row_o * P.width + col_o] = output;
}
}
//
}
#endif // #ifndef _2DCONVOLUTION_KERNEL_H_
| 0e360d3804eb82aa7353a5ef10dcb5bba96ebf23.cu | #ifndef _2DCONVOLUTION_KERNEL_H_
#define _2DCONVOLUTION_KERNEL_H_
#include <stdio.h>
#include "2Dconvolution.h"
// Matrix multiplication kernel thread specification
__global__ void ConvolutionKernel(Matrix N, Matrix P)
{
__shared__ float N_s[BLOCK_SIZE][BLOCK_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y * TILE_SIZE + ty;
int col_o = blockIdx.x * TILE_SIZE + tx;
int n = KERNEL_SIZE/2;
int row_i = row_o - n;
int col_i = col_o - n;
float output = 0.0f;
// ghost element condition
if((row_i >= 0) && (row_i < N.height) &&
(col_i >= 0) && (col_i < N.width) ) {
N_s[ty][tx] = N.elements[row_i*N.width + col_i];
}
else{
N_s[ty][tx] = 0.0f;
}
__syncthreads();
if(ty < TILE_SIZE && tx < TILE_SIZE){
for(int i = 0; i < KERNEL_SIZE; i++) {
for(int j = 0; j < KERNEL_SIZE; j++) {
output += Mc[i][j] * N_s[i+ty][j+tx];
}
}
if(row_o < P.height && col_o < P.width){
P.elements[row_o * P.width + col_o] = output;
}
}
//
}
#endif // #ifndef _2DCONVOLUTION_KERNEL_H_
|
769e21351315042766406cec4f8303e599ecc393.hip | // !!! This is a file automatically generated by hipify!!!
#include "funset.hpp"
#include <iostream>
#include <algorithm>
#include <memory>
#include <hip/hip_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_")
#include <device_launch_parameters.h>
#include "common.hpp"
/* __global__: ;;,3.2
;void;,
;,
gridblock,(<<< >>>);
a kernel,(GPUCUDAkernel(
),__global__);*/
__global__ static void dot_product(const float* A, const float* B, float* partial_C, int elements_num)
{
/* __shared__: __shared____device__
blockblock
block__shared____constant__
__shared__extern
__shared__CUDA C
__shared__CUDA C
*/
__shared__ float cache[256]; // == threadsPerBlock
/* gridDim: ,,,
,,.
dim3
blockDim: ,block.dim3,
block;,,
;
blockIdx: ,;
threadblockgrid,blockIdx.x
[0,gridDim.x-1],blockIdx.y[0, gridDim.y-1].uint3,
blockgrid;
threadIdx: ,;
threadblock;threadIdx.x,
threadIdx.y,threadIdx.z;uint3
,threadblock */
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float tmp{ 0.f };
while (tid < elements_num) {
tmp += A[tid] * B[tid];
tid += blockDim.x * gridDim.x;
}
// cache
//
//
cache[cacheIndex] = tmp;
/* __syncthreads: CUDA
__syncthreads()
__syncthreads();block(shared
memory)(kernel
__syncthreads())clock()
clock()
__syncthreads()block
threadblock
thread */
__syncthreads();
// codethreadPerBlock2
int i = blockDim.x / 2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
// cache
//
__syncthreads();
i /= 2;
}
// cacheIndex == 0
//
// cache[0]
if (cacheIndex == 0)
partial_C[blockIdx.x] = cache[0];
}
static int dot_product_gpu_1(const float* A, const float* B, float* value, int elements_num, float* elapsed_time)
{
/* hipEvent_t: CUDA event types,, CUDA,GPU
,CUDAGPU,CUDA
GPU,*/
hipEvent_t start, stop;
// hipEventCreate: ,
hipEventCreate(&start);
hipEventCreate(&stop);
// hipEventRecord: ,,start
hipEventRecord(start, 0);
size_t lengthA{ elements_num * sizeof(float) }, lengthB{ elements_num * sizeof(float) };
float *d_A{ nullptr }, *d_B{ nullptr }, *d_partial_C{ nullptr };
// hipMalloc:
hipMalloc(&d_A, lengthA);
hipMalloc(&d_B, lengthB);
/* hipMemcpy: ,:
(1). hipMemcpyHostToHost:
(2). hipMemcpyHostToDevice:
(3). hipMemcpyDeviceToHost:
(4). hipMemcpyDeviceToDevice:
(5). hipMemcpyDefault: ,
(CUDA6.0)
cudaMemcpy */
hipMemcpy(d_A, A, lengthA, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, lengthB, hipMemcpyHostToDevice);
const int threadsPerBlock{ 256 };
const int blocksPerGrid = ::min(64, (elements_num + threadsPerBlock - 1) / threadsPerBlock);
size_t lengthC{ blocksPerGrid * sizeof(float) };
hipMalloc(&d_partial_C, lengthC);
/* <<< >>>: CUDA,,
CUDA,,
;,
,,
;;
kernel,kernel,
GPU,;
API,<<<Dg,Db,Ns,S>>>
,Dgdim3,grid
.Dg,gridDg.x*Dg.y*Dg.zblock;Db
dim3,block.Db,
blockDb.x*Db.y*Db.zthread;Nssize_t,
,
(extern __shared__);Ns,0;S
cudaStream_t,.S,0. */
dot_product << < blocksPerGrid, threadsPerBlock >> >(d_A, d_B, d_partial_C, elements_num);
/* hipDeviceSynchronize: kernel, ,
cudaDeviceSynchronize; ,
,,
,,
,cudaDeviceSynchronize
reference: https://stackoverflow.com/questions/11888772/when-to-call-cudadevicesynchronize */
//hipDeviceSynchronize();
std::unique_ptr<float[]> partial_C(new float[blocksPerGrid]);
hipMemcpy(partial_C.get(), d_partial_C, lengthC, hipMemcpyDeviceToHost);
*value = 0.f;
for (int i = 0; i < blocksPerGrid; ++i) {
(*value) += partial_C[i];
}
// hipFree: cudaMalloc
hipFree(d_A);
hipFree(d_B);
hipFree(d_partial_C);
// hipEventRecord: ,,stop
hipEventRecord(stop, 0);
// hipEventSynchronize: ,,
hipEventSynchronize(stop);
// cudaEventElapseTime: ,,
hipEventElapsedTime(elapsed_time, start, stop);
// hipEventDestroy: ,
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
static int dot_product_gpu_2(const float* A, const float* B, float* value, int elements_num, float* elapsed_time)
{
// hipDeviceProp_t: cuda
hipDeviceProp_t prop;
int count;
// hipGetDeviceCount:
hipGetDeviceCount(&count);
//fprintf(stderr, "device count: %d\n", count);
int whichDevice;
// hipGetDevice: IDID0
hipGetDevice(&whichDevice);
// hipGetDeviceProperties: GPU
hipGetDeviceProperties(&prop, whichDevice);
// hipDeviceProp_t::canMapHostMemory: GPU
if (prop.canMapHostMemory != 1) {
fprintf(stderr, "Device cannot map memory.\n");
return -1;
}
// hipSetDeviceFlags:
//
hipSetDeviceFlags(hipDeviceMapHost);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
const int threadsPerBlock{ 256 };
const int blocksPerGrid = ::min(64, (elements_num + threadsPerBlock - 1) / threadsPerBlock);
size_t lengthA{ elements_num * sizeof(float) }, lengthB{ elements_num * sizeof(float) };
float *d_A{ nullptr }, *d_B{ nullptr }, *d_partial_C{ nullptr };
float *a{ nullptr }, *b{ nullptr }, *partial_c{ nullptr };
/* hipHostMalloc: Cmalloc
(Pagable)cudaHostAlloc
(Pinned Memory)
GPU
"(Direct Memory Access, DMA)"GPU
cudaMemcpy
cudaHostAlloc+cudaHostAllocMapped
cudaHostAlloc+cudaHostAllocDefaultcudaHostAllocMapped
cudaHostAllocDefault
GPUCUDA C
GPU
hipHostMallocMappedGPU
hipHostMallocWriteCombined"
(Write-Combined)"
GPUCPU""
GPU
GPU */
// allocate the memory on the CPU
hipHostMalloc(&a, lengthA, hipHostMallocWriteCombined | hipHostMallocMapped);
hipHostMalloc(&b, lengthB, hipHostMallocWriteCombined | hipHostMallocMapped);
hipHostMalloc(&partial_c, blocksPerGrid * sizeof(float), hipHostMallocMapped);
/* hipHostGetDevicePointer: cudaHostAlloc
GPUCPUcudaHostAllocCPU
cudaHostGetDevicePointerGPU
GPU */
// find out the GPU pointers
hipHostGetDevicePointer(&d_A, a, 0);
hipHostGetDevicePointer(&d_B, b, 0);
hipHostGetDevicePointer(&d_partial_C, partial_c, 0);
memcpy(a, A, lengthA);
memcpy(b, B, lengthB);
dot_product << < blocksPerGrid, threadsPerBlock >> >(d_A, d_B, d_partial_C, elements_num);
/* hipDeviceSynchronize: , CPUGPU*/
hipDeviceSynchronize();
*value = 0.f;
for (int i = 0; i < blocksPerGrid; ++i) {
(*value) += partial_c[i];
}
// hipHostFree: cudaHostAlloc
hipHostFree(d_A);
hipHostFree(d_B);
hipHostFree(d_partial_C);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(elapsed_time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
}
int dot_product_gpu(const float* A, const float* B, float* value, int elements_num, float* elapsed_time)
{
int ret{ 0 };
//ret = dot_product_gpu_1(A, B, value, elements_num, elapsed_time); //
ret = dot_product_gpu_2(A, B, value, elements_num, elapsed_time); //
return ret;
}
| 769e21351315042766406cec4f8303e599ecc393.cu | #include "funset.hpp"
#include <iostream>
#include <algorithm>
#include <memory>
#include <cuda_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_")
#include <device_launch_parameters.h>
#include "common.hpp"
/* __global__: 函数类型限定符;在设备上运行;在主机端调用,计算能力3.2及以上可以在
设备端调用;声明的函数的返回值必须是void类型;对此类型函数的调用是异步的,即在
设备完全完成它的运行之前就返回了;对此类型函数的调用必须指定执行配置,即用于在
设备上执行函数时的grid和block的维度,以及相关的流(即插入<<< >>>运算符);
a kernel,表示此函数为内核函数(运行在GPU上的CUDA并行计算函数称为kernel(内核函
数),内核函数必须通过__global__函数类型限定符定义);*/
__global__ static void dot_product(const float* A, const float* B, float* partial_C, int elements_num)
{
/* __shared__: 变量类型限定符;使用__shared__限定符,或者与__device__限
定符连用,此时声明的变量位于block中的共享存储器空间中,与block具有相同
的生命周期,仅可通过block内的所有线程访问;__shared__和__constant__变量
默认为是静态存储;在__shared__前可以加extern关键字,但表示的是变量大小
由执行参数确定;__shared__变量在声明时不能初始化;可以将CUDA C的关键字
__shared__添加到变量声明中,这将使这个变量驻留在共享内存中;CUDA C编译
器对共享内存中的变量与普通变量将分别采取不同的处理方式 */
__shared__ float cache[256]; // == threadsPerBlock
/* gridDim: 内置变量,用于描述线程网格的维度,对于所有线程块来说,这个
变量是一个常数,用来保存线程格每一维的大小,即每个线程格中线程块的数量.
为dim3类型;
blockDim: 内置变量,用于说明每个block的维度与尺寸.为dim3类型,包含
了block在三个维度上的尺寸信息;对于所有线程块来说,这个变量是一个常数,
保存的是线程块中每一维的线程数量;
blockIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程块的索引;用
于说明当前thread所在的block在整个grid中的位置,blockIdx.x取值范围是
[0,gridDim.x-1],blockIdx.y取值范围是[0, gridDim.y-1].为uint3类型,
包含了一个block在grid中各个维度上的索引信息;
threadIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程索引;用于
说明当前thread在block中的位置;如果线程是一维的可获取threadIdx.x,如果
是二维的还可获取threadIdx.y,如果是三维的还可获取threadIdx.z;为uint3类
型,包含了一个thread在block中各个维度的索引信息 */
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float tmp{ 0.f };
while (tid < elements_num) {
tmp += A[tid] * B[tid];
tid += blockDim.x * gridDim.x;
}
// 设置cache中相应位置上的值
// 共享内存缓存中的偏移就等于线程索引;线程块索引与这个偏移无关,因为每
// 个线程块都拥有该共享内存的私有副本
cache[cacheIndex] = tmp;
/* __syncthreads: 对线程块中的线程进行同步;CUDA架构将确保,除非线程块
中的每个线程都执行了__syncthreads(),否则没有任何线程能执行
__syncthreads()之后的指令;在同一个block中的线程通过共享存储器(shared
memory)交换数据,并通过栅栏同步(可以在kernel函数中需要同步的位置调用
__syncthreads()函数)保证线程间能够正确地共享数据;使用clock()函数计时,
在内核函数中要测量的一段代码的开始和结束的位置分别调用一次clock()函数,
并将结果记录下来。由于调用__syncthreads()函数后,一个block中的所有
thread需要的时间是相同的,因此只需要记录每个block执行需要的时间就行了,
而不需要记录每个thread的时间 */
__syncthreads();
// 对于规约运算来说,以下code要求threadPerBlock必须是2的指数
int i = blockDim.x / 2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
// 在循环迭代中更新了共享内存变量cache,并且在循环的下一次迭代开始之前,
// 需要确保当前迭代中所有线程的更新操作都已经完成
__syncthreads();
i /= 2;
}
// 只有cacheIndex == 0的线程执行这个保存操作,这是因为只有一个值写入到
// 全局内存,因此只需要一个线程来执行这个操作,当然你也可以选择任何一个
// 线程将cache[0]写入到全局内存
if (cacheIndex == 0)
partial_C[blockIdx.x] = cache[0];
}
static int dot_product_gpu_1(const float* A, const float* B, float* value, int elements_num, float* elapsed_time)
{
/* cudaEvent_t: CUDA event types,结构体类型, CUDA事件,用于测量GPU在某
个任务上花费的时间,CUDA中的事件本质上是一个GPU时间戳,由于CUDA事件是在
GPU上实现的,因此它们不适于对同时包含设备代码和主机代码的混合代码计时*/
cudaEvent_t start, stop;
// cudaEventCreate: 创建一个事件对象,异步启动
cudaEventCreate(&start);
cudaEventCreate(&stop);
// cudaEventRecord: 记录一个事件,异步启动,start记录起始时间
cudaEventRecord(start, 0);
size_t lengthA{ elements_num * sizeof(float) }, lengthB{ elements_num * sizeof(float) };
float *d_A{ nullptr }, *d_B{ nullptr }, *d_partial_C{ nullptr };
// cudaMalloc: 在设备端分配内存
cudaMalloc(&d_A, lengthA);
cudaMalloc(&d_B, lengthB);
/* cudaMemcpy: 在主机端和设备端拷贝数据,此函数第四个参数仅能是下面之一:
(1). cudaMemcpyHostToHost: 拷贝数据从主机端到主机端
(2). cudaMemcpyHostToDevice: 拷贝数据从主机端到设备端
(3). cudaMemcpyDeviceToHost: 拷贝数据从设备端到主机端
(4). cudaMemcpyDeviceToDevice: 拷贝数据从设备端到设备端
(5). cudaMemcpyDefault: 从指针值自动推断拷贝数据方向,需要支持
统一虚拟寻址(CUDA6.0及以上版本)
cudaMemcpy函数对于主机是同步的 */
cudaMemcpy(d_A, A, lengthA, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, lengthB, cudaMemcpyHostToDevice);
const int threadsPerBlock{ 256 };
const int blocksPerGrid = std::min(64, (elements_num + threadsPerBlock - 1) / threadsPerBlock);
size_t lengthC{ blocksPerGrid * sizeof(float) };
cudaMalloc(&d_partial_C, lengthC);
/* <<< >>>: 为CUDA引入的运算符,指定线程网格和线程块维度等,传递执行参
数给CUDA编译器和运行时系统,用于说明内核函数中的线程数量,以及线程是如何
组织的;尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何
启动设备代码,传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函
数调用一样;不同计算能力的设备对线程的总数和组织方式有不同的约束;必须
先为kernel中用到的数组或变量分配好足够的空间,再调用kernel函数,否则在
GPU计算时会发生错误,例如越界等;
使用运行时API时,需要在调用的内核函数名与参数列表直接以<<<Dg,Db,Ns,S>>>
的形式设置执行配置,其中:Dg是一个dim3型变量,用于设置grid的维度和各个
维度上的尺寸.设置好Dg后,grid中将有Dg.x*Dg.y*Dg.z个block;Db是
一个dim3型变量,用于设置block的维度和各个维度上的尺寸.设置好Db后,每个
block中将有Db.x*Db.y*Db.z个thread;Ns是一个size_t型变量,指定各块为此调
用动态分配的共享存储器大小,这些动态分配的存储器可供声明为外部数组
(extern __shared__)的其他任何变量使用;Ns是一个可选参数,默认值为0;S为
cudaStream_t类型,用于设置与内核函数关联的流.S是一个可选参数,默认值0. */
dot_product << < blocksPerGrid, threadsPerBlock >> >(d_A, d_B, d_partial_C, elements_num);
/* cudaDeviceSynchronize: kernel的启动是异步的, 为了定位它是否出错, 一
般需要加上cudaDeviceSynchronize函数进行同步; 将会一直处于阻塞状态,直到
前面所有请求的任务已经被全部执行完毕,如果前面执行的某个任务失败,将会
返回一个错误;当程序中有多个流,并且流之间在某一点需要通信时,那就必须
在这一点处加上同步的语句,即cudaDeviceSynchronize;异步启动
reference: https://stackoverflow.com/questions/11888772/when-to-call-cudadevicesynchronize */
//cudaDeviceSynchronize();
std::unique_ptr<float[]> partial_C(new float[blocksPerGrid]);
cudaMemcpy(partial_C.get(), d_partial_C, lengthC, cudaMemcpyDeviceToHost);
*value = 0.f;
for (int i = 0; i < blocksPerGrid; ++i) {
(*value) += partial_C[i];
}
// cudaFree: 释放设备上由cudaMalloc函数分配的内存
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_partial_C);
// cudaEventRecord: 记录一个事件,异步启动,stop记录结束时间
cudaEventRecord(stop, 0);
// cudaEventSynchronize: 事件同步,等待一个事件完成,异步启动
cudaEventSynchronize(stop);
// cudaEventElapseTime: 计算两个事件之间经历的时间,单位为毫秒,异步启动
cudaEventElapsedTime(elapsed_time, start, stop);
// cudaEventDestroy: 销毁事件对象,异步启动
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
static int dot_product_gpu_2(const float* A, const float* B, float* value, int elements_num, float* elapsed_time)
{
// cudaDeviceProp: cuda设备属性结构体
cudaDeviceProp prop;
int count;
// cudaGetDeviceCount: 获得计算能力设备的数量
cudaGetDeviceCount(&count);
//fprintf(stderr, "device count: %d\n", count);
int whichDevice;
// cudaGetDevice: 获得当前正在使用的设备ID,设备ID从0开始编号
cudaGetDevice(&whichDevice);
// cudaGetDeviceProperties: 获取GPU设备相关信息
cudaGetDeviceProperties(&prop, whichDevice);
// cudaDeviceProp::canMapHostMemory: GPU是否支持设备映射主机内存
if (prop.canMapHostMemory != 1) {
fprintf(stderr, "Device cannot map memory.\n");
return -1;
}
// cudaSetDeviceFlags: 设置设备要用于执行的标志
// 将设备置入能分配零拷贝内存的状态
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
const int threadsPerBlock{ 256 };
const int blocksPerGrid = std::min(64, (elements_num + threadsPerBlock - 1) / threadsPerBlock);
size_t lengthA{ elements_num * sizeof(float) }, lengthB{ elements_num * sizeof(float) };
float *d_A{ nullptr }, *d_B{ nullptr }, *d_partial_C{ nullptr };
float *a{ nullptr }, *b{ nullptr }, *partial_c{ nullptr };
/* cudaHostAlloc: 分配主机内存。C库函数malloc将分配标准的,可
分页的(Pagable)主机内存,而cudaHostAlloc将分配页锁定的主机内存。页锁定内
存也称为固定内存(Pinned Memory)或者不可分页内存,它有一个重要的属性:操作系
统将不会对这块内存分页并交换到磁盘上,从而确保了该内存始终驻留在物理内
存中。因此,操作系统能够安全地使某个应用程序访问该内存的物理地址,因为
这块内存将不会被破坏或者重新定位。由于GPU知道内存的物理地址,因此可以通
过"直接内存访问(Direct Memory Access, DMA)"技术来在GPU和主机之间复制数据。
固定内存是一把双刃剑。当使用固定内存时,你将失去虚拟内存的所有功能。
建议:仅对cudaMemcpy调用中的源内存或者目标内存,才使用页锁定内存,并且在
不再需要使用它们时立即释放。
零拷贝内存:通过cudaHostAlloc函数+cudaHostAllocMapped参数,而固定内存是
cudaHostAlloc函数+cudaHostAllocDefault参数。通过cudaHostAllocMapped分配
的主机内存也是固定的,它与通过cudaHostAllocDefault分配的固定内存有着相同
的属性。但这种内存除了可以用于主机与GPU之间的内存复制外,还可以在CUDA C核
函数中直接访问这种类型的主机内存,而不需要复制到GPU,因此也称为零拷贝内存。
cudaHostAllocMapped:这个标志告诉运行时将从GPU中访问这块内存。
cudaHostAllocWriteCombined:这个标志表示,运行时应该将内存分配为"合并式写
入(Write-Combined)"内存。这个标志并不会改变应用程序的性能,但却可以显著地
提升GPU读取内存时的性能。然而,当CPU也要读取这块内存时,"合并式写入"会显得
很低效。
对于集成GPU,使用零拷贝内存通常都会带来性能提升,因为内存在物理上与主机是
共享的。将缓冲区声明为零拷贝内存的唯一作用就是避免不必要的数据复制。所有类型
的固定内存都存在一定的局限性,零拷贝内存同样不例外:每个固定内存都会占用系统
的可用物理内存,这最终将降低系统的性能。
当输入内存和输出内存都只能使用一次时,那么在独立GPU上使用零拷贝内存将带来性能提升。 */
// allocate the memory on the CPU
cudaHostAlloc(&a, lengthA, cudaHostAllocWriteCombined | cudaHostAllocMapped);
cudaHostAlloc(&b, lengthB, cudaHostAllocWriteCombined | cudaHostAllocMapped);
cudaHostAlloc(&partial_c, blocksPerGrid * sizeof(float), cudaHostAllocMapped);
/* cudaHostGetDevicePointer: 获得由cudaHostAlloc分配的映射主机内存的设备指针。
由于GPU的虚拟内存空间地址映射与CPU不同,而cudaHostAlloc返回的是CPU上的指针,
因此需要调用cudaHostGetDevicePointer函数来获得这块内存在GPU上的有效指针。这些指针
将被传递给核函数,并在随后由GPU对这块内存执行读取和写入等操作 */
// find out the GPU pointers
cudaHostGetDevicePointer(&d_A, a, 0);
cudaHostGetDevicePointer(&d_B, b, 0);
cudaHostGetDevicePointer(&d_partial_C, partial_c, 0);
memcpy(a, A, lengthA);
memcpy(b, B, lengthB);
dot_product << < blocksPerGrid, threadsPerBlock >> >(d_A, d_B, d_partial_C, elements_num);
/* cudaThreadSynchronize: 等待计算设备完成, 将CPU与GPU同步*/
cudaThreadSynchronize();
*value = 0.f;
for (int i = 0; i < blocksPerGrid; ++i) {
(*value) += partial_c[i];
}
// cudaFreeHost: 释放设备上由cudaHostAlloc函数分配的内存
cudaFreeHost(d_A);
cudaFreeHost(d_B);
cudaFreeHost(d_partial_C);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(elapsed_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
int dot_product_gpu(const float* A, const float* B, float* value, int elements_num, float* elapsed_time)
{
int ret{ 0 };
//ret = dot_product_gpu_1(A, B, value, elements_num, elapsed_time); // 普通实现
ret = dot_product_gpu_2(A, B, value, elements_num, elapsed_time); // 通过零拷贝内存实现
return ret;
}
|
d34bd5ce60575094a13d7d6e59c5be9a80614101.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "analyse.hpp"
#include <helper_cuda.h>
extern bool done;
//
DetectionUnits::DetectionUnits(int SampleFq, float base_fq, float max_fq)
:nz(SampleFq), omh_base(base_fq), omh_max(max_fq),
ih(NULL), clen(NULL), sdds(NULL), sddc(NULL), spec(NULL),
aasc(NULL),
spec2(NULL), fspec(NULL), mask(NULL), comh(NULL), tidx(NULL),
ws(NULL), wc(NULL), fm(0), omh(0)
{
printf("SampleFreq=%d\n", SampleFq);
printf("minFreq=%f\n", base_fq);
printf("maxFreq=%f\n", max_fq);
nnz1=t*nz;
h=1.0/nz;
nnz2=interp*nnz1;
dt1=1.0/nz;
dt2=1.0/(nz*interp);
gm=-1;
cutlen.push_back(nnz1/omh_base);
omh.push_back((float)nnz1/cutlen[0]);
om.push_back(omh[0]*2*M_PI);
init_detection_unit();
init_cutwaves();
checkCudaErrors(hipStreamCreate(&stream[0]));
checkCudaErrors(hipStreamCreate(&stream[1]));
checkCudaErrors(hipDeviceSetLimit(hipLimitStackSize, 8*1024));
}
//
void DetectionUnits::init_detection_unit()
{
int i=0;
printf("min on unit %d=%fHz\n", i, omh[i]);
while(omh[i++] < omh_max)
{
if(gm < 0){
cutlen.push_back(cutlen[i-1]/f);
omh.push_back((float)nnz1/cutlen[i]);
om.push_back(omh[i]*2*M_PI);
if(cutlen[i] < start_interp){
gm = i+1;
printf("switch to spline on unit %d=%fHz\n", i, omh[i]);
}
}
else{
if(gm == i){
cutlen.push_back(cutlen[i-1]*interp/f);
}
else{
cutlen.push_back(cutlen[i-1]/f);
}
omh.push_back((float)nnz2/cutlen[i]);
om.push_back(omh[i]*2*M_PI);
}
}
fm = i--;
printf("max on unit %d=%fHz\n", i, omh[i]);
specbuflen = 512 * 1024 * 1024 / (sizeof(float) * fm * interp) * interp;
}
//
void DetectionUnits::init_cutwaves()
{
checkCudaErrors(hipMallocManaged(&ws, fm*sizeof(float*), hipMemAttachGlobal));
checkCudaErrors(hipMallocManaged(&wc, fm*sizeof(float*), hipMemAttachGlobal));
int j=0;
for(auto cl: cutlen){
checkCudaErrors(hipMallocManaged(&ws[j], (cl+1)*sizeof(float), hipMemAttachGlobal));
checkCudaErrors(hipMallocManaged(&wc[j], (cl+1)*sizeof(float), hipMemAttachGlobal));
j++;
}
j=0;
for(auto cl: cutlen){
if(j < gm){
for(int i=1; i<=cl; i++){
ws[j][i] = sin(om[j] * dt1 * i);
wc[j][i] = cos(om[j] * dt1 * i);
}
}
else {
for(int i=1; i<=cl; i++){
ws[j][i] = sin(om[j] * dt2 * i);
wc[j][i] = cos(om[j] * dt2 * i);
}
}
j++;
}
}
DetectionUnits::~DetectionUnits()
{
while(callcount > 0)
std::this_thread::sleep_for(std::chrono::milliseconds(10));
for(int i = 0; i < fm; i++){
hipFree(ws[i]);
hipFree(wc[i]);
}
hipFree(ws);
ws = NULL;
hipFree(wc);
wc = NULL;
hipFree(aasc);
aasc = NULL;
hipFree(spec);
spec = NULL;
hipFree(fspec);
fspec = NULL;
hipFree(spec2);
spec2 = NULL;
hipFree(mask);
mask = NULL;
hipFree(comh);
comh = NULL;
hipFree(tidx);
tidx = NULL;
hipFree(sddc);
sddc = NULL;
hipFree(sdds);
sdds = NULL;
hipFree(clen);
clen = NULL;
hipFree(ih);
ih = NULL;
hipStreamDestroy(stream[0]);
hipStreamDestroy(stream[1]);
}
struct params
{
const float *z;
const long zlen;
const float *s;
const int sstart;
const int slope;
const int *cutlen;
const int gm;
const int fm;
const float **ws;
const float **wc;
int *ih;
float *sdds;
float *sddc;
float *spec;
float *aasc;
int *op;
int *ip;
};
#define thread_N 32
#define thread_N2 1024
__device__ void process_wave(float *sdds, float *sddc, float *spec, float *aasc, const float **ws, const float **wc, int idx, int ih, int c, int fm, int outp, float s_ii, float s_ik)
{
float sds = sdds[idx] + ws[idx][ih]*s_ii - ws[idx][ih]*s_ik;
float sdc = sddc[idx] + wc[idx][ih]*s_ii - wc[idx][ih]*s_ik;
sdds[idx] = sds;
sddc[idx] = sdc;
float sads = 2.0 * sds / c;
float sadc = 2.0 * sdc / c;
float adss = sads * ws[idx][ih];
float adsc = sads * wc[idx][ih];
float adcc = sadc * wc[idx][ih];
float adcs = sadc * ws[idx][ih];
float aadc = adss + adcc;
float aads = adsc - adcs;
spec[outp*fm+idx] = sqrtf(aads * aads + aadc * aadc);
aasc[outp*fm+idx] = atan2f(aads, aadc);
}
__device__ void process_wave2(float *sdds, float *sddc, const float **ws, const float **wc, int idx, int ih, float s_ii, float s_ik)
{
float sds = sdds[idx] + ws[idx][ih]*s_ii - ws[idx][ih]*s_ik;
float sdc = sddc[idx] + wc[idx][ih]*s_ii - wc[idx][ih]*s_ik;
sdds[idx] = sds;
sddc[idx] = sdc;
}
__device__ float slope_data(const float *data, int i, int len, int slope)
{
if(i < 0 || i >= len) return 0;
if(slope != 0 && i < slope) return data[i] * (float)i/slope;
return data[i];
}
__device__ float limit_data(const float *data, int i, int len)
{
if(i < 0 || i >= len) return 0;
return data[i];
}
__global__ void kernel3(int plen, struct params p)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= p.fm){
return;
}
int zlen = p.zlen;
int slen = zlen / interp;
int sstart = p.sstart;
int zstart = sstart * interp;
int slope = p.slope;
int zslope = p.slope * interp;
bool zflag = (idx < p.gm);
int c = p.cutlen[idx];
int ih = p.ih[idx];
int outp = 0;
int ic = 1;
for(; ic < zlen && outp < plen/interp; ic++){
int ii = (zflag)? ic/interp: ic;
int ik = ii - c;
if(ic % interp == 1){
float s_ii = slope_data((zflag)? p.s: p.z, ii, (zflag)? slen: zlen, slope*((zflag)?1:interp));
float s_ik = slope_data((zflag)? p.s: p.z, ik, (zflag)? slen: zlen, slope*((zflag)?1:interp));
if(ih <= 0)
ih = c;
if(ic < zstart){
process_wave2(p.sdds, p.sddc, p.ws, p.wc, idx, ih, s_ii, s_ik);
}
else{
process_wave(p.sdds, p.sddc, p.spec, p.aasc, p.ws, p.wc, idx, ih, c, p.fm, outp++, s_ii, s_ik);
}
ih--;
}
else if(!zflag){
float z_ii = slope_data(p.z, ii, zlen, zslope);
float z_ik = slope_data(p.z, ik, zlen, zslope);
if(ih <= 0)
ih = c;
process_wave2(p.sdds, p.sddc, p.ws, p.wc, idx, ih, z_ii, z_ik);
ih--;
}
}
p.ih[idx] = ih;
if(idx != 0) return;
*(p.ip) = ic;
*(p.op) = outp;
}
__global__ void kernel4(int sindex, int plen, struct params p)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= p.fm) return;
int zlen = p.zlen;
int slen = zlen / interp;
bool zflag = (idx < p.gm);
int c = p.cutlen[idx];
int ih = p.ih[idx];
int outp = 0;
int ic;
for(ic = sindex; ic < zlen && ic < sindex + plen; ic++){
int ii = (zflag)? ic/interp: ic;
int ik = ii - c;
if(ic % interp == 1){
float s_ii = limit_data((zflag)? p.s: p.z, ii, (zflag)? slen: zlen);
float s_ik = limit_data((zflag)? p.s: p.z, ik, (zflag)? slen: zlen);
if(ih <= 0)
ih = c;
process_wave(p.sdds, p.sddc, p.spec, p.aasc, p.ws, p.wc, idx, ih, c, p.fm, outp++, s_ii, s_ik);
ih--;
}
else if(!zflag){
float z_ii = limit_data(p.z, ii, zlen);
float z_ik = limit_data(p.z, ik, zlen);
if(ih <= 0)
ih = c;
process_wave2(p.sdds, p.sddc, p.ws, p.wc, idx, ih, z_ii, z_ik);
ih--;
}
}
p.ih[idx] = ih;
if(idx != 0) return;
*(p.ip) = ic;
*(p.op) = outp;
}
__device__ float mainlobe(float Hz, float peakHz, float peakValue)
{
float f = log10f(Hz) - log10f(peakHz);
float a = -1e4;
return a*f*f + peakValue+0.1;
}
__device__ float lowerslope(float Hz, float peakHz, float b)
{
float f1 = log10f(fabsf(Hz-peakHz)/peakHz);
float f2 = log10f(Hz);
return -17*f1 + 14*f2 + b;
}
__device__ float higherslope(float Hz, float peakHz, float b)
{
float f1 = log10f(fabsf(Hz-peakHz)/peakHz);
float f2 = log10f(Hz);
return -19*f1 + 22*f2 + b;
}
#define noiseLevel -70
__device__ void findmask(float *mask, float *result, float *src, int *idx, int fcont, float *omh, int fm)
{
int maxidx = 0;
int count = 0;
for(int i = (0 < idx[fcont]-3)? idx[fcont]-3 : 0; i < ((idx[fcont]+4 < fm)? idx[fcont]+4: fm); i++){
if(src[i] == src[idx[fcont]]){
maxidx = i;
count++;
}
}
if(count > 0)
maxidx /= count;
else
maxidx = idx[fcont];
float Hz = omh[maxidx];
float value = src[maxidx];
float valuedB = 20*log10f(value);
float lowerPoint = Hz - Hz*0.06;
float higherPoint = Hz + Hz*0.06;
float b1 = mainlobe(lowerPoint, Hz, valuedB) - lowerslope(lowerPoint, Hz, 0);
float b2 = mainlobe(higherPoint, Hz, valuedB) - higherslope(higherPoint, Hz, 0);
for(int j = fcont; j >= 0; j--){
int i = idx[j];
float fq = omh[i];
float vmask = value;
float vmaskdB = valuedB;
if(i == maxidx){
result[i] = src[i];
}
if(i == maxidx){
vmask = value;
}
else if (maxidx - 5 < i && i < maxidx + 5){
vmask = value;
}
else if(fq < lowerPoint){
vmaskdB = lowerslope(fq, Hz, b1);
vmask = powf(10, vmaskdB/20);
}
else if(fq > higherPoint){
vmaskdB = higherslope(fq, Hz, b2);
vmask = powf(10, vmaskdB/20);
}
else{
vmaskdB = mainlobe(fq, Hz, valuedB);
vmask = powf(10, vmaskdB/20);
}
//if(mask[i] < vmask)
// mask[i] = vmask;
mask[i] += vmask;
}
}
__device__ void selection_sort(float *data, int *idx, int left, int right)
{
for(int i = left; i <= right; i++){
int minidx = i;
float minvalue = data[idx[minidx]];
for(int j = i+1; j <= right; j++){
if(data[idx[j]] < minvalue){
minidx = j;
minvalue = data[idx[j]];
}
}
if(minidx != i){
int t = idx[minidx];
idx[minidx] = idx[i];
idx[i] = t;
}
}
}
__device__ void quick_sort(float *data, int *idx, int left, int right, int depth)
{
if(left >= right) return;
if(right - left < 4 || depth > 32){
selection_sort(data, idx, left, right);
return;
}
int leftp = left;
int rightp = right;
int pivotp = (right + left)/2;
if(idx[pivotp] <0)
printf("error\n");
float pivot = data[idx[pivotp]];
while(leftp <= rightp){
if(idx[leftp] < 0)
printf("error\n");
if(idx[rightp] < 0)
printf("error\n");
while(data[idx[leftp]] < pivot && leftp < right){
leftp++;
if(idx[leftp] < 0)
printf("error\n");
}
while(data[idx[rightp]] > pivot && left < rightp){
rightp--;
if(idx[rightp] < 0)
printf("error\n");
}
if(leftp <= rightp){
int t = idx[leftp];
idx[leftp] = idx[rightp];
idx[rightp] = t;
leftp++;
rightp--;
}
}
if(idx[leftp] < 0)
printf("error\n");
if(idx[rightp] < 0)
printf("error\n");
if(rightp > left){
quick_sort(data, idx, left, rightp, depth+1);
}
if(leftp < right){
quick_sort(data, idx, leftp, right, depth+1);
}
}
__global__ void kernel5(float *spec, int *tidx, float *fspec, float *mask, float *comh, int len, int fm)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= len) return;
spec = &spec[idx*fm];
tidx = &tidx[idx*fm];
fspec = &fspec[idx*fm];
mask = &mask[idx*fm];
const float cutoff = 10000;
const float noise = powf(10, noiseLevel/20);
for(int i = 0; i < fm; i++){
mask[i] = 0;
fspec[i] = 0;
tidx[i] = -1;
}
int c = 0;
for(int i = 0; i < fm; i++){
if(comh[i] > cutoff) break;
float value = spec[i];
if(value <= noise) continue;
tidx[c++] = i;
}
if(c == 0) return;
if(c > 0)
quick_sort(spec, tidx, 0, c-1, 0);
for(int i = c-1; i >= 0; i--){
int max_idx = tidx[i];
float max_value = spec[max_idx];
if(max_value <= mask[max_idx]) continue;
findmask(mask, fspec, spec, tidx, i, comh, fm);
}
}
//
int DetectionUnits::AnalyzeData(
std::vector<float> &ret_spec,
std::vector<float> &ret_aasc,
int offset,
int len,
const float *s,
int slen,
const float *z)
{
ret_spec.resize(len*fm);
ret_aasc.resize(len*fm);
memset(ret_spec.data(), 0, sizeof(float)*len*fm);
memset(ret_aasc.data(), 0, sizeof(float)*len*fm);
if(offset > slen) return 0;
if(offset + len > slen) len = slen - offset;
callcount++;
int c_buf = cutlen[0];
int prepad = c_buf*3;
int slope = c_buf*2;
if(!ih)
checkCudaErrors(hipMallocManaged(&ih, fm*sizeof(int), hipMemAttachGlobal));
if(!clen){
checkCudaErrors(hipMallocManaged(&clen, fm*sizeof(int), hipMemAttachGlobal));
memcpy(clen, cutlen.data(), fm*sizeof(int));
}
if(!comh){
checkCudaErrors(hipMallocManaged(&comh, fm*sizeof(float), hipMemAttachGlobal));
memcpy(comh, omh.data(), fm*sizeof(float));
}
memcpy(ih, cutlen.data(), fm*sizeof(int));
if(!sdds)
checkCudaErrors(hipMallocManaged(&sdds, fm*sizeof(float), hipMemAttachGlobal));
if(!sddc)
checkCudaErrors(hipMallocManaged(&sddc, fm*sizeof(float), hipMemAttachGlobal));
memset(sdds, 0, fm*sizeof(float));
memset(sddc, 0, fm*sizeof(float));
if(!spec)
checkCudaErrors(hipMallocManaged(&spec, specbuflen*fm*sizeof(float), hipMemAttachGlobal));
if(!aasc)
checkCudaErrors(hipMallocManaged(&aasc, specbuflen*fm*sizeof(float), hipMemAttachGlobal));
int *op, *ip;
checkCudaErrors(hipMallocManaged(&op, sizeof(int), hipMemAttachGlobal));
checkCudaErrors(hipMallocManaged(&ip, sizeof(int), hipMemAttachGlobal));
*op = 0;
*ip = 0;
if(prepad > offset)
prepad = offset;
if(prepad < c_buf)
slope = 0;
else
slope = prepad - cutlen[0];
int fixoffset = offset - prepad;
int fixlen = prepad + len;
printf("AnalyzeData %d\n", fixlen*interp);
struct params p = {&z[fixoffset*interp], fixlen*interp, &s[fixoffset], prepad, slope, clen, gm, fm, (const float **)ws, (const float **)wc, ih, sdds, sddc, spec, aasc, op, ip};
int idx = 0;
while(*ip < fixlen*interp){
int plen = (*ip + specbuflen < fixlen*interp)? specbuflen: fixlen*interp - *ip;
if(*ip > 0){
hipLaunchKernelGGL(( kernel4), dim3(ceil((float)fm/thread_N)), dim3(thread_N), 0, stream[0], *ip, plen, p);
}
else{
hipLaunchKernelGGL(( kernel3), dim3(ceil((float)fm/thread_N)), dim3(thread_N), 0, stream[0], plen, p);
}
hipStreamSynchronize(stream[0]);
int lenp = *op;
if(lenp <= 0){
printf("return length is 0\n");
break;
}
memcpy(&ret_spec[idx*fm], spec, lenp*fm*sizeof(float));
memcpy(&ret_aasc[idx*fm], aasc, lenp*fm*sizeof(float));
idx += lenp;
}
printf("AnalyzeData %d end\n", fixlen*interp);
hipFree(op);
hipFree(ip);
callcount--;
return idx;
}
int DetectionUnits::FilterData(
const std::vector<float> &in_spec,
std::vector<float> &ret_fspec)
{
int len = in_spec.size()/fm;
ret_fspec.resize(len*fm);
memset(ret_fspec.data(), 0, sizeof(float)*len*fm);
callcount++;
const int buflen = 64 * 1024;
if(!spec2)
checkCudaErrors(hipMallocManaged(&spec2, buflen*fm*sizeof(float), hipMemAttachGlobal));
if(!fspec)
checkCudaErrors(hipMallocManaged(&fspec, buflen*fm*sizeof(float), hipMemAttachGlobal));
if(!mask)
checkCudaErrors(hipMallocManaged(&mask, buflen*fm*sizeof(float), hipMemAttachGlobal));
if(!tidx)
checkCudaErrors(hipMallocManaged(&tidx, buflen*fm*sizeof(int), hipMemAttachGlobal));
printf("FilterData %d\n", len);
int idx = 0;
while(idx < len){
int plen = (idx + buflen < len)? buflen: len - idx;
printf("plen2 %d\n", plen);
memcpy(spec2, &in_spec[idx*fm], plen*fm*sizeof(float));
hipLaunchKernelGGL(( kernel5), dim3(ceil((float)plen/thread_N2)), dim3(thread_N2), 0, stream[0], spec2, tidx, fspec, mask, comh, plen, fm);
hipStreamSynchronize(stream[0]);
memcpy(&ret_fspec[idx*fm], fspec, plen*fm*sizeof(float));
idx += plen;
}
printf("FilterData %d end\n", len);
callcount--;
return idx;
}
| d34bd5ce60575094a13d7d6e59c5be9a80614101.cu | #include "analyse.hpp"
#include <helper_cuda.h>
extern bool done;
//コンストラクタ 初期設定
DetectionUnits::DetectionUnits(int SampleFq, float base_fq, float max_fq)
:nz(SampleFq), omh_base(base_fq), omh_max(max_fq),
ih(NULL), clen(NULL), sdds(NULL), sddc(NULL), spec(NULL),
aasc(NULL),
spec2(NULL), fspec(NULL), mask(NULL), comh(NULL), tidx(NULL),
ws(NULL), wc(NULL), fm(0), omh(0)
{
printf("SampleFreq=%d\n", SampleFq);
printf("minFreq=%f\n", base_fq);
printf("maxFreq=%f\n", max_fq);
nnz1=t*nz;
h=1.0/nz;
nnz2=interp*nnz1;
dt1=1.0/nz;
dt2=1.0/(nz*interp);
gm=-1;
cutlen.push_back(nnz1/omh_base);
omh.push_back((float)nnz1/cutlen[0]);
om.push_back(omh[0]*2*M_PI);
init_detection_unit();
init_cutwaves();
checkCudaErrors(cudaStreamCreate(&stream[0]));
checkCudaErrors(cudaStreamCreate(&stream[1]));
checkCudaErrors(cudaDeviceSetLimit(cudaLimitStackSize, 8*1024));
}
// 信号検出ユニットの設定周波数計算
void DetectionUnits::init_detection_unit()
{
int i=0;
printf("min on unit %d=%fHz\n", i, omh[i]);
while(omh[i++] < omh_max)
{
if(gm < 0){
cutlen.push_back(cutlen[i-1]/f);
omh.push_back((float)nnz1/cutlen[i]);
om.push_back(omh[i]*2*M_PI);
if(cutlen[i] < start_interp){
gm = i+1;
printf("switch to spline on unit %d=%fHz\n", i, omh[i]);
}
}
else{
if(gm == i){
cutlen.push_back(cutlen[i-1]*interp/f);
}
else{
cutlen.push_back(cutlen[i-1]/f);
}
omh.push_back((float)nnz2/cutlen[i]);
om.push_back(omh[i]*2*M_PI);
}
}
fm = i--;
printf("max on unit %d=%fHz\n", i, omh[i]);
specbuflen = 512 * 1024 * 1024 / (sizeof(float) * fm * interp) * interp;
}
//切り出し波の設定
void DetectionUnits::init_cutwaves()
{
checkCudaErrors(cudaMallocManaged(&ws, fm*sizeof(float*), CU_MEM_ATTACH_GLOBAL));
checkCudaErrors(cudaMallocManaged(&wc, fm*sizeof(float*), CU_MEM_ATTACH_GLOBAL));
int j=0;
for(auto cl: cutlen){
checkCudaErrors(cudaMallocManaged(&ws[j], (cl+1)*sizeof(float), CU_MEM_ATTACH_GLOBAL));
checkCudaErrors(cudaMallocManaged(&wc[j], (cl+1)*sizeof(float), CU_MEM_ATTACH_GLOBAL));
j++;
}
j=0;
for(auto cl: cutlen){
if(j < gm){
for(int i=1; i<=cl; i++){
ws[j][i] = sin(om[j] * dt1 * i);
wc[j][i] = cos(om[j] * dt1 * i);
}
}
else {
for(int i=1; i<=cl; i++){
ws[j][i] = sin(om[j] * dt2 * i);
wc[j][i] = cos(om[j] * dt2 * i);
}
}
j++;
}
}
DetectionUnits::~DetectionUnits()
{
while(callcount > 0)
std::this_thread::sleep_for(std::chrono::milliseconds(10));
for(int i = 0; i < fm; i++){
cudaFree(ws[i]);
cudaFree(wc[i]);
}
cudaFree(ws);
ws = NULL;
cudaFree(wc);
wc = NULL;
cudaFree(aasc);
aasc = NULL;
cudaFree(spec);
spec = NULL;
cudaFree(fspec);
fspec = NULL;
cudaFree(spec2);
spec2 = NULL;
cudaFree(mask);
mask = NULL;
cudaFree(comh);
comh = NULL;
cudaFree(tidx);
tidx = NULL;
cudaFree(sddc);
sddc = NULL;
cudaFree(sdds);
sdds = NULL;
cudaFree(clen);
clen = NULL;
cudaFree(ih);
ih = NULL;
cudaStreamDestroy(stream[0]);
cudaStreamDestroy(stream[1]);
}
struct params
{
const float *z;
const long zlen;
const float *s;
const int sstart;
const int slope;
const int *cutlen;
const int gm;
const int fm;
const float **ws;
const float **wc;
int *ih;
float *sdds;
float *sddc;
float *spec;
float *aasc;
int *op;
int *ip;
};
#define thread_N 32
#define thread_N2 1024
__device__ void process_wave(float *sdds, float *sddc, float *spec, float *aasc, const float **ws, const float **wc, int idx, int ih, int c, int fm, int outp, float s_ii, float s_ik)
{
float sds = sdds[idx] + ws[idx][ih]*s_ii - ws[idx][ih]*s_ik;
float sdc = sddc[idx] + wc[idx][ih]*s_ii - wc[idx][ih]*s_ik;
sdds[idx] = sds;
sddc[idx] = sdc;
float sads = 2.0 * sds / c;
float sadc = 2.0 * sdc / c;
float adss = sads * ws[idx][ih];
float adsc = sads * wc[idx][ih];
float adcc = sadc * wc[idx][ih];
float adcs = sadc * ws[idx][ih];
float aadc = adss + adcc;
float aads = adsc - adcs;
spec[outp*fm+idx] = sqrtf(aads * aads + aadc * aadc);
aasc[outp*fm+idx] = atan2f(aads, aadc);
}
__device__ void process_wave2(float *sdds, float *sddc, const float **ws, const float **wc, int idx, int ih, float s_ii, float s_ik)
{
float sds = sdds[idx] + ws[idx][ih]*s_ii - ws[idx][ih]*s_ik;
float sdc = sddc[idx] + wc[idx][ih]*s_ii - wc[idx][ih]*s_ik;
sdds[idx] = sds;
sddc[idx] = sdc;
}
__device__ float slope_data(const float *data, int i, int len, int slope)
{
if(i < 0 || i >= len) return 0;
if(slope != 0 && i < slope) return data[i] * (float)i/slope;
return data[i];
}
__device__ float limit_data(const float *data, int i, int len)
{
if(i < 0 || i >= len) return 0;
return data[i];
}
__global__ void kernel3(int plen, struct params p)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= p.fm){
return;
}
int zlen = p.zlen;
int slen = zlen / interp;
int sstart = p.sstart;
int zstart = sstart * interp;
int slope = p.slope;
int zslope = p.slope * interp;
bool zflag = (idx < p.gm);
int c = p.cutlen[idx];
int ih = p.ih[idx];
int outp = 0;
int ic = 1;
for(; ic < zlen && outp < plen/interp; ic++){
int ii = (zflag)? ic/interp: ic;
int ik = ii - c;
if(ic % interp == 1){
float s_ii = slope_data((zflag)? p.s: p.z, ii, (zflag)? slen: zlen, slope*((zflag)?1:interp));
float s_ik = slope_data((zflag)? p.s: p.z, ik, (zflag)? slen: zlen, slope*((zflag)?1:interp));
if(ih <= 0)
ih = c;
if(ic < zstart){
process_wave2(p.sdds, p.sddc, p.ws, p.wc, idx, ih, s_ii, s_ik);
}
else{
process_wave(p.sdds, p.sddc, p.spec, p.aasc, p.ws, p.wc, idx, ih, c, p.fm, outp++, s_ii, s_ik);
}
ih--;
}
else if(!zflag){
float z_ii = slope_data(p.z, ii, zlen, zslope);
float z_ik = slope_data(p.z, ik, zlen, zslope);
if(ih <= 0)
ih = c;
process_wave2(p.sdds, p.sddc, p.ws, p.wc, idx, ih, z_ii, z_ik);
ih--;
}
}
p.ih[idx] = ih;
if(idx != 0) return;
*(p.ip) = ic;
*(p.op) = outp;
}
__global__ void kernel4(int sindex, int plen, struct params p)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= p.fm) return;
int zlen = p.zlen;
int slen = zlen / interp;
bool zflag = (idx < p.gm);
int c = p.cutlen[idx];
int ih = p.ih[idx];
int outp = 0;
int ic;
for(ic = sindex; ic < zlen && ic < sindex + plen; ic++){
int ii = (zflag)? ic/interp: ic;
int ik = ii - c;
if(ic % interp == 1){
float s_ii = limit_data((zflag)? p.s: p.z, ii, (zflag)? slen: zlen);
float s_ik = limit_data((zflag)? p.s: p.z, ik, (zflag)? slen: zlen);
if(ih <= 0)
ih = c;
process_wave(p.sdds, p.sddc, p.spec, p.aasc, p.ws, p.wc, idx, ih, c, p.fm, outp++, s_ii, s_ik);
ih--;
}
else if(!zflag){
float z_ii = limit_data(p.z, ii, zlen);
float z_ik = limit_data(p.z, ik, zlen);
if(ih <= 0)
ih = c;
process_wave2(p.sdds, p.sddc, p.ws, p.wc, idx, ih, z_ii, z_ik);
ih--;
}
}
p.ih[idx] = ih;
if(idx != 0) return;
*(p.ip) = ic;
*(p.op) = outp;
}
__device__ float mainlobe(float Hz, float peakHz, float peakValue)
{
float f = log10f(Hz) - log10f(peakHz);
float a = -1e4;
return a*f*f + peakValue+0.1;
}
__device__ float lowerslope(float Hz, float peakHz, float b)
{
float f1 = log10f(fabsf(Hz-peakHz)/peakHz);
float f2 = log10f(Hz);
return -17*f1 + 14*f2 + b;
}
__device__ float higherslope(float Hz, float peakHz, float b)
{
float f1 = log10f(fabsf(Hz-peakHz)/peakHz);
float f2 = log10f(Hz);
return -19*f1 + 22*f2 + b;
}
#define noiseLevel -70
__device__ void findmask(float *mask, float *result, float *src, int *idx, int fcont, float *omh, int fm)
{
int maxidx = 0;
int count = 0;
for(int i = (0 < idx[fcont]-3)? idx[fcont]-3 : 0; i < ((idx[fcont]+4 < fm)? idx[fcont]+4: fm); i++){
if(src[i] == src[idx[fcont]]){
maxidx = i;
count++;
}
}
if(count > 0)
maxidx /= count;
else
maxidx = idx[fcont];
float Hz = omh[maxidx];
float value = src[maxidx];
float valuedB = 20*log10f(value);
float lowerPoint = Hz - Hz*0.06;
float higherPoint = Hz + Hz*0.06;
float b1 = mainlobe(lowerPoint, Hz, valuedB) - lowerslope(lowerPoint, Hz, 0);
float b2 = mainlobe(higherPoint, Hz, valuedB) - higherslope(higherPoint, Hz, 0);
for(int j = fcont; j >= 0; j--){
int i = idx[j];
float fq = omh[i];
float vmask = value;
float vmaskdB = valuedB;
if(i == maxidx){
result[i] = src[i];
}
if(i == maxidx){
vmask = value;
}
else if (maxidx - 5 < i && i < maxidx + 5){
vmask = value;
}
else if(fq < lowerPoint){
vmaskdB = lowerslope(fq, Hz, b1);
vmask = powf(10, vmaskdB/20);
}
else if(fq > higherPoint){
vmaskdB = higherslope(fq, Hz, b2);
vmask = powf(10, vmaskdB/20);
}
else{
vmaskdB = mainlobe(fq, Hz, valuedB);
vmask = powf(10, vmaskdB/20);
}
//if(mask[i] < vmask)
// mask[i] = vmask;
mask[i] += vmask;
}
}
__device__ void selection_sort(float *data, int *idx, int left, int right)
{
for(int i = left; i <= right; i++){
int minidx = i;
float minvalue = data[idx[minidx]];
for(int j = i+1; j <= right; j++){
if(data[idx[j]] < minvalue){
minidx = j;
minvalue = data[idx[j]];
}
}
if(minidx != i){
int t = idx[minidx];
idx[minidx] = idx[i];
idx[i] = t;
}
}
}
__device__ void quick_sort(float *data, int *idx, int left, int right, int depth)
{
if(left >= right) return;
if(right - left < 4 || depth > 32){
selection_sort(data, idx, left, right);
return;
}
int leftp = left;
int rightp = right;
int pivotp = (right + left)/2;
if(idx[pivotp] <0)
printf("error\n");
float pivot = data[idx[pivotp]];
while(leftp <= rightp){
if(idx[leftp] < 0)
printf("error\n");
if(idx[rightp] < 0)
printf("error\n");
while(data[idx[leftp]] < pivot && leftp < right){
leftp++;
if(idx[leftp] < 0)
printf("error\n");
}
while(data[idx[rightp]] > pivot && left < rightp){
rightp--;
if(idx[rightp] < 0)
printf("error\n");
}
if(leftp <= rightp){
int t = idx[leftp];
idx[leftp] = idx[rightp];
idx[rightp] = t;
leftp++;
rightp--;
}
}
if(idx[leftp] < 0)
printf("error\n");
if(idx[rightp] < 0)
printf("error\n");
if(rightp > left){
quick_sort(data, idx, left, rightp, depth+1);
}
if(leftp < right){
quick_sort(data, idx, leftp, right, depth+1);
}
}
__global__ void kernel5(float *spec, int *tidx, float *fspec, float *mask, float *comh, int len, int fm)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx >= len) return;
spec = &spec[idx*fm];
tidx = &tidx[idx*fm];
fspec = &fspec[idx*fm];
mask = &mask[idx*fm];
const float cutoff = 10000;
const float noise = powf(10, noiseLevel/20);
for(int i = 0; i < fm; i++){
mask[i] = 0;
fspec[i] = 0;
tidx[i] = -1;
}
int c = 0;
for(int i = 0; i < fm; i++){
if(comh[i] > cutoff) break;
float value = spec[i];
if(value <= noise) continue;
tidx[c++] = i;
}
if(c == 0) return;
if(c > 0)
quick_sort(spec, tidx, 0, c-1, 0);
for(int i = c-1; i >= 0; i--){
int max_idx = tidx[i];
float max_value = spec[max_idx];
if(max_value <= mask[max_idx]) continue;
findmask(mask, fspec, spec, tidx, i, comh, fm);
}
}
//解析を実施する
int DetectionUnits::AnalyzeData(
std::vector<float> &ret_spec,
std::vector<float> &ret_aasc,
int offset,
int len,
const float *s,
int slen,
const float *z)
{
ret_spec.resize(len*fm);
ret_aasc.resize(len*fm);
memset(ret_spec.data(), 0, sizeof(float)*len*fm);
memset(ret_aasc.data(), 0, sizeof(float)*len*fm);
if(offset > slen) return 0;
if(offset + len > slen) len = slen - offset;
callcount++;
int c_buf = cutlen[0];
int prepad = c_buf*3;
int slope = c_buf*2;
if(!ih)
checkCudaErrors(cudaMallocManaged(&ih, fm*sizeof(int), CU_MEM_ATTACH_GLOBAL));
if(!clen){
checkCudaErrors(cudaMallocManaged(&clen, fm*sizeof(int), CU_MEM_ATTACH_GLOBAL));
memcpy(clen, cutlen.data(), fm*sizeof(int));
}
if(!comh){
checkCudaErrors(cudaMallocManaged(&comh, fm*sizeof(float), CU_MEM_ATTACH_GLOBAL));
memcpy(comh, omh.data(), fm*sizeof(float));
}
memcpy(ih, cutlen.data(), fm*sizeof(int));
if(!sdds)
checkCudaErrors(cudaMallocManaged(&sdds, fm*sizeof(float), CU_MEM_ATTACH_GLOBAL));
if(!sddc)
checkCudaErrors(cudaMallocManaged(&sddc, fm*sizeof(float), CU_MEM_ATTACH_GLOBAL));
memset(sdds, 0, fm*sizeof(float));
memset(sddc, 0, fm*sizeof(float));
if(!spec)
checkCudaErrors(cudaMallocManaged(&spec, specbuflen*fm*sizeof(float), CU_MEM_ATTACH_GLOBAL));
if(!aasc)
checkCudaErrors(cudaMallocManaged(&aasc, specbuflen*fm*sizeof(float), CU_MEM_ATTACH_GLOBAL));
int *op, *ip;
checkCudaErrors(cudaMallocManaged(&op, sizeof(int), CU_MEM_ATTACH_GLOBAL));
checkCudaErrors(cudaMallocManaged(&ip, sizeof(int), CU_MEM_ATTACH_GLOBAL));
*op = 0;
*ip = 0;
if(prepad > offset)
prepad = offset;
if(prepad < c_buf)
slope = 0;
else
slope = prepad - cutlen[0];
int fixoffset = offset - prepad;
int fixlen = prepad + len;
printf("AnalyzeData %d\n", fixlen*interp);
struct params p = {&z[fixoffset*interp], fixlen*interp, &s[fixoffset], prepad, slope, clen, gm, fm, (const float **)ws, (const float **)wc, ih, sdds, sddc, spec, aasc, op, ip};
int idx = 0;
while(*ip < fixlen*interp){
int plen = (*ip + specbuflen < fixlen*interp)? specbuflen: fixlen*interp - *ip;
if(*ip > 0){
kernel4<<<ceil((float)fm/thread_N), thread_N, 0, stream[0]>>>(*ip, plen, p);
}
else{
kernel3<<<ceil((float)fm/thread_N), thread_N, 0, stream[0]>>>(plen, p);
}
cudaStreamSynchronize(stream[0]);
int lenp = *op;
if(lenp <= 0){
printf("return length is 0\n");
break;
}
memcpy(&ret_spec[idx*fm], spec, lenp*fm*sizeof(float));
memcpy(&ret_aasc[idx*fm], aasc, lenp*fm*sizeof(float));
idx += lenp;
}
printf("AnalyzeData %d end\n", fixlen*interp);
cudaFree(op);
cudaFree(ip);
callcount--;
return idx;
}
int DetectionUnits::FilterData(
const std::vector<float> &in_spec,
std::vector<float> &ret_fspec)
{
int len = in_spec.size()/fm;
ret_fspec.resize(len*fm);
memset(ret_fspec.data(), 0, sizeof(float)*len*fm);
callcount++;
const int buflen = 64 * 1024;
if(!spec2)
checkCudaErrors(cudaMallocManaged(&spec2, buflen*fm*sizeof(float), CU_MEM_ATTACH_GLOBAL));
if(!fspec)
checkCudaErrors(cudaMallocManaged(&fspec, buflen*fm*sizeof(float), CU_MEM_ATTACH_GLOBAL));
if(!mask)
checkCudaErrors(cudaMallocManaged(&mask, buflen*fm*sizeof(float), CU_MEM_ATTACH_GLOBAL));
if(!tidx)
checkCudaErrors(cudaMallocManaged(&tidx, buflen*fm*sizeof(int), CU_MEM_ATTACH_GLOBAL));
printf("FilterData %d\n", len);
int idx = 0;
while(idx < len){
int plen = (idx + buflen < len)? buflen: len - idx;
printf("plen2 %d\n", plen);
memcpy(spec2, &in_spec[idx*fm], plen*fm*sizeof(float));
kernel5<<<ceil((float)plen/thread_N2), thread_N2, 0, stream[0]>>>(spec2, tidx, fspec, mask, comh, plen, fm);
cudaStreamSynchronize(stream[0]);
memcpy(&ret_fspec[idx*fm], fspec, plen*fm*sizeof(float));
idx += plen;
}
printf("FilterData %d end\n", len);
callcount--;
return idx;
}
|
02dad1383d747d013d6a3eb0bbc79d4fea4861db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MarchingCubes.h"
// Lauch bounds
#define THREADS_PER_BLOCK 256
#if __CUDA_ARCH__ >= 200
#define MY_KERNEL_MAX_THREADS (2 * THREADS_PER_BLOCK)
#define MY_KERNEL_MIN_BLOCKS 3
#else
#define MY_KERNEL_MAX_THREADS THREADS_PER_BLOCK
#define MY_KERNEL_MIN_BLOCKS 2
#endif
// defines
#define BIT_1 0x1
#define BIT_2 0x2
#define BIT_3 0x4
#define BIT_4 0x8
#define BIT_5 0x10
#define BIT_6 0x20
#define BIT_7 0x40
#define BIT_8 0x80
#define BIT_16 0x8000
// Empty Bucket
#define EMPTY_BUCKET_32 -1
#define EMPTY_BUCKET_64 0ull
// Shared memory experiments
#define AMB_BLOCKSIZE 64
#define MC_BLOCKSIZE 512
// type aliases
// Introduce convenient aliases here
using uint = unsigned int;
using uchar = unsigned char;
using ushort = unsigned short;
using ullong = unsigned long long;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// error handling
#define cudaCheckError() { \
hipError_t e=hipGetLastError(); \
if(e!=hipSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \
exit(0); \
} \
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Convenience function
template<typename T>
int size(T t) {
int s{ 0 };
hipMemcpy(&s, t.t_size, sizeof(int), hipMemcpyDeviceToHost);
return s;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The hash table to obtain a unique vertex index
struct VertexHashTable {
int* key{ nullptr };
int* addr{ nullptr };
int t_size;
};
void initVertexHashTable(VertexHashTable& ht, const int size) {
ht.t_size = size;
hipMalloc(&ht.key, size * sizeof(int));
hipMalloc(&ht.addr, size * sizeof(int));
//hipMemset(key, EMPTY_BUCKET_32, size * sizeof(int));
}
void freeVertexHashTable(VertexHashTable& h) {
if (h.key != nullptr) {
hipFree(h.key);
}
if (h.addr != nullptr) {
hipFree(h.addr);
}
h.t_size = 0;
h.key = nullptr;
h.addr = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Vertex array
struct Vertices {
float4* vertices{ nullptr };
float4* normals{ nullptr };
int* t_size{ nullptr }; // atomic counter to compute nr. of vertices
};
void initVertices(Vertices& v, const int size) {
hipMalloc(&v.vertices, size * sizeof(float4));
hipMalloc(&v.normals, size * sizeof(float4));
hipMalloc(&v.t_size, sizeof(int));
hipMemset(v.t_size, 0, sizeof(int));
}
void freeVertices(Vertices& v) {
if (v.vertices != nullptr) {
hipFree(v.vertices);
}
if (v.normals != nullptr) {
hipFree(v.normals);
}
if (v.t_size != nullptr) {
hipFree(v.t_size);
}
v.t_size = nullptr;
v.vertices = nullptr;
v.normals = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// a triangle consist of three indices
struct Triangles {
int a_size; // size of buffer
int4* triangles{ nullptr };
int* t_size{ nullptr }; // atomic counter to compute nr. of triangles
};
void initTriangles(Triangles& t, const int size) {
t.a_size = size;
hipMalloc(&t.triangles, size * sizeof(int4));
hipMalloc(&t.t_size, sizeof(int));
hipMemset(t.t_size, 0, sizeof(int));
}
void freeTriangles(Triangles& t) {
if (t.triangles != nullptr) {
hipFree(t.triangles);
}
if (t.t_size != nullptr) {
hipFree(t.t_size);
}
t.a_size = 0;
t.triangles = nullptr;
t.t_size = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Track cell cases and ids
struct CellsIds {
int* cells_{ nullptr };
int* t_size{ nullptr }; // atomic counter to get address of ambiguous cell in a_cells array
};
void initCells(CellsIds& c, const int size) {
hipMalloc(&c.cells_, size * sizeof(int));
hipMalloc(&c.t_size, sizeof(int));
hipMemset(c.t_size, 0, sizeof(int));
}
void freeCells(CellsIds& c) {
if (c.cells_ != nullptr) {
hipFree(c.cells_);
}
if (c.t_size != nullptr) {
hipFree(c.t_size);
}
c.t_size = nullptr;
c.cells_ = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Track ambiguous cases
struct AmbiguousCells {
int* a_cells{ nullptr };
int* t_size{ nullptr }; // atomic counter to get address of ambiguous cell in a_cells array
};
void initACells(AmbiguousCells& ac, const int size) {
hipMalloc(&ac.a_cells, size * sizeof(int));
hipMalloc(&ac.t_size, sizeof(int));
hipMemset(ac.t_size, 0, sizeof(int));
}
void freeACells(AmbiguousCells& ac) {
if (ac.a_cells != nullptr) {
hipFree(ac.a_cells);
}
if (ac.t_size != nullptr) {
hipFree(ac.t_size);
}
ac.t_size = nullptr;
ac.a_cells = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Hash table to map halfedge twins
struct HalfedgeHashTable {
int t_size{ 0 };
unsigned long long* key{ nullptr };
int2* he_ids{ nullptr };
};
void initHalfedgeHashTable(HalfedgeHashTable& t, const int size) {
t.t_size = size;
hipMalloc(&t.key, size * sizeof(unsigned long long));
//cudaCheckError();
hipMemset(t.key, 0, size * sizeof(unsigned long long));
//cudaCheckError();
hipMalloc(&t.he_ids, size * sizeof(int2));
}
__device__ bool addHalfedgeToHashTable (HalfedgeHashTable t, const int addr, const int v0, const int v1) {
unsigned long long x = (unsigned long long)v0;
unsigned long long y = (unsigned long long)v1;
unsigned long long key = (x < y) ? y : x;
key = key + (x + y) * (x + y + 1) / 2ull;
{
key = (~key) + (key << 21); // key = (key << 21) - key - 1;
key = key ^ (key >> 24);
key = (key + (key << 3)) + (key << 8); // key * 265
key = key ^ (key >> 14);
key = (key + (key << 2)) + (key << 4); // key * 21
key = key ^ (key >> 28);
key = key + (key << 31);
}
// open hashing
int h = int(key % (unsigned long long)t.t_size);
int e = 1;
for (int loop = 0; loop < 128; loop++) {
unsigned long long old = atomicCAS(&t.key[h], EMPTY_BUCKET_64, key);
if (old == EMPTY_BUCKET_64 || old == key) {
if (v0 < v1) {
t.he_ids[h].x = addr;
}
else {
t.he_ids[h].y = addr;
}
return true;
}
else {
// step with linear probing
h = (h + e*e) % t.t_size;
e = e + 1;
}
}
//printf("ERROR: can't add halfedge\n");
return false;
}
void freeHalfedgeHashTable(HalfedgeHashTable& t) {
if (t.key != nullptr) {
hipFree(t.key);
}
if (t.he_ids != nullptr) {
hipFree(t.he_ids);
}
//
t.t_size = 0;
t.key = nullptr;
t.he_ids = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Halfedge data structure
struct Halfedges {
int* t_size{ nullptr };
int buffSize{ 0 };
// halfedge int4:
// he.x = origin vertex
// he.y = face
// he.z = next
// he.w = tween
int4* he_e{ nullptr };
int* he_v{ nullptr }; // helfedge id
int* he_f{ nullptr };
};
void initHalfedges(Halfedges& h, const int nr_he, const int nr_v, const int nr_t) {
h.buffSize = nr_he;
hipMalloc(&h.he_e, nr_he * sizeof(int4));
//cudaCheckError();
hipMalloc(&h.he_v, nr_v * sizeof(int));
//cudaCheckError();
hipMalloc(&h.he_f, nr_t * sizeof(int));
hipMalloc(&h.t_size, sizeof(int));
hipMemset(&h.t_size, 0, sizeof(int));
}
//__device__ void add(HalfedgeHashTable het_, const int v0, const int v1, const int v2) {
// const int a_ = atomicAdd(t_size, 3);
// const int f_ = a_ / 3;
// // he 0
// he_e[a_].x = v0;
// he_e[a_].y = f_; // there are three halfedges for each face (triangle)
// he_e[a_].z = a_ + 1; // next
// he_e[a_].w = -1; // default is boundary edge
//
// // he 1
// he_e[a_ + 1].x = v1;
// he_e[a_ + 1].y = f_; // there are three halfedges for each face (triangle)
// he_e[a_ + 1].z = a_ + 2;
// he_e[a_ + 1].w = -1; // default is boundary edge
//
// // he 2
// he_e[a_ + 2].x = v2;
// he_e[a_ + 2].y = f_; // there are three halfedges for each face (triangle)
// he_e[a_ + 2].z = a_;
// he_e[a_ + 2].w = -1; // default is boundary edge
//
// // add halfedges ids to hash table
// het_.add(a_, v0, v1);
// het_.add(a_ + 1, v1, v2);
// het_.add(a_ + 2, v2, v0);
//}
void freeHalfedges(Halfedges& h) {
if (h.t_size != nullptr) {
hipFree(h.t_size);
}
if (h.he_e != nullptr) {
hipFree(h.he_e);
}
if (h.he_v != nullptr) {
hipFree(h.he_v);
}
if (h.he_f != nullptr) {
hipFree(h.he_f);
}
//
h.buffSize = 0;
h.t_size = nullptr;
h.he_e = nullptr;
h.he_v = nullptr;
h.he_f = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// MC lookup tables
struct MC_lookup {
ushort* e_{ nullptr };
unsigned long long* t_{ nullptr };
};
void initMC_lookup(MC_lookup& l, const std::array<unsigned short, 256>& ep_, const std::array<int, 4096>& tp_, const std::array<unsigned char, 256>& ta_) {
ushort* le_ = new ushort[256];
for (int i = 0; i < 256; i++) {
le_[i] = ep_[i];
le_[i] |= (ta_[i] == 105) ? BIT_16 : 0x0;
}
hipMalloc(&l.e_, 256 * sizeof(ushort));
hipMemcpy(l.e_, &le_[0], 256 * sizeof(ushort), hipMemcpyHostToDevice);
cudaCheckError();
// create MC loolup table
unsigned long long* l_ = new unsigned long long[256];
unsigned long long flg = 0xFull;
for (int i = 0; i < 256; i++) {
int i_case = i * 16;
unsigned long long f = 0ull;
for (int t = 0; t < 16; t++) {
int mcval = tp_[i_case + t];
unsigned long long lmcval = (unsigned long long)mcval;
if (mcval == -1) {
f |= (flg << (t * 4));
}
else {
f |= (lmcval << (t * 4));
}
}
l_[i] = f;
}
hipMalloc(&l.t_, 256 * sizeof(unsigned long long));
hipMemcpy(l.t_, &l_[0], 256 * sizeof(unsigned long long), hipMemcpyHostToDevice);
cudaCheckError();
delete[] le_;
delete[] l_;
}
void freeMC_lookup(MC_lookup& l)
{
if (l.e_ != nullptr) {
hipFree(l.e_);
}
if (l.t_ != nullptr) {
hipFree(l.t_);
}
l.e_ = nullptr;
l.t_ = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Uniform grid
struct UniformGrid {
int idim{ 0 };
int jdim{ 0 };
int kdim{ 0 };
float x0{ 0 };
float y0{ 0 };
float z0{ 0 };
float dx{ 0 };
float dy{ 0 };
float dz{ 0 };
__device__ int gl_index(const int i, const int j, const int k) {
return (k * jdim * idim + j * idim + i);
}
__device__ int i_index(const int gl_index) {
return (gl_index % idim);
}
__device__ int j_index(const int gl_index) {
return ((gl_index / idim) % jdim);
}
__device__ int k_index(const int gl_index) {
return (gl_index / (idim * jdim));
}
__host__ void size(const int x_size, const int y_size, const int z_size) {
idim = x_size;
jdim = y_size;
kdim = z_size;
}
__host__ void origin(const float x, const float y, const float z) {
x0 = x;
y0 = y;
z0 = z;
}
__host__ void spacing(const float x, const float y, const float z) {
dx = x;
dy = y;
dz = z;
}
};
using UGrid = UniformGrid;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// timer based on CUDA timen routines
struct CTimer {
float e_milliseconds;
hipEvent_t c_start;
hipEvent_t c_stop;
CTimer() {
hipEventCreate(&c_start);
hipEventCreate(&c_stop);
}
void __host__ start() {
hipEventRecord(c_start);
}
void __host__ stop() {
hipEventRecord(c_stop);
hipEventSynchronize(c_stop);
hipEventElapsedTime(&e_milliseconds, c_start, c_stop);
}
void __host__ print() {
std::cout << std::setprecision(7) << " ... time in ms: " << e_milliseconds << std::endl;
}
void __host__ print(std::string& m) {
std::cout << std::setprecision(7) << " ... " << m << " time in ms: " << e_milliseconds << std::endl;
}
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using namespace p_mc;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute number of vertices computed for this cell
// compute only the intersection of the iso-surface with
// the cell edges
template<typename T>
__device__ uint numberOfSetBits(T n) {
// C or C++: use uint32_t
uint b = (uint)n;
b = b - ((b >> 1) & 0x55555555);
b = (b & 0x33333333) + ((b >> 2) & 0x33333333);
return (((b + (b >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Reduce
// warp reduce based on __shfl_down
template<typename T>
__device__ int warpReduceSum(T val) {
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
val += __shfl_down(val, offset);
}
return val;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// warp reduce kernel
template<typename T>
__global__ void warp_reduce_kernel(T *in, T* out, int N) {
T sum = T(0);
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<N; i += blockDim.x*gridDim.x) {
sum += in[i];
}
sum = warpReduceSum(sum);
if (threadIdx.x%warpSize == 0)
atomicAdd(out, sum);
}
// host function for warp reduce
template<typename T>
int warpReduce(T *i_data, const int N) {
int threads = 256;
int blocks = ::min((N + threads - 1) / threads, 2048);
T* d_sum{ nullptr };
hipMalloc(&d_sum, sizeof(int));
hipMemsetAsync(d_sum, 0, sizeof(int));
warp_reduce_kernel<typename T> << <blocks, threads >> >(i_data, d_sum, N);
cudaCheckError();
// return sum
T h_sum{ 0 };
hipMemcpy(&h_sum, d_sum, sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_sum);
return h_sum;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Index computations
//__host__ __device__ int global_index(const int i, const int j, const int k, const int idim, const int jdim, const int kdim)
//{
// return (k * jdim * idim + j * idim + i);
//}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// trilinear interpolation
__device__ void trilinear(float4& po, const float3 p[8], const float u, const float v, const float w)
{
po.x = (1 - w) * ((1 - v) * (p[0].x + u * (p[1].x - p[0].x)) + v * (p[2].x + u * (p[3].x - p[2].x))) + w * ((1 - v) * (p[4].x + u * (p[5].x - p[4].x)) + v * (p[6].x + u * (p[7].x - p[6].x)));
po.y = (1 - w) * ((1 - v) * (p[0].y + u * (p[1].y - p[0].y)) + v * (p[2].y + u * (p[3].y - p[2].y))) + w * ((1 - v) * (p[4].y + u * (p[5].y - p[4].y)) + v * (p[6].y + u * (p[7].y - p[6].y)));
po.z = (1 - w) * ((1 - v) * (p[0].z + u * (p[1].z - p[0].z)) + v * (p[2].z + u * (p[3].z - p[2].z))) + w * ((1 - v) * (p[4].z + u * (p[5].z - p[4].z)) + v * (p[6].z + u * (p[7].z - p[6].z)));
//po.y = (1 - w) * ((1 - v) * (p[0].y * (1 - u) + p[1].y * u) + v * (p[2].y * (1 - u) + p[3].y * u)) + w * ((1 - v) * (p[4].y * (1 - u) + p[5].y * u) + v * (p[6].y * (1 - u) + p[7].y * u));
//po.z = (1 - w) * ((1 - v) * (p[0].z * (1 - u) + p[1].z * u) + v * (p[2].z * (1 - u) + p[3].z * u)) + w * ((1 - v) * (p[4].z * (1 - u) + p[5].z * u) + v * (p[6].z * (1 - u) + p[7].z * u));
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Hash tables
// device hash function
__device__ uint hash_function( uint key )
{
key = (key ^ 61) ^ (key >> 16);
key = key + (key << 3);
key = key ^ (key >> 4);
key = key * 0x27d4eb2d;
key = key ^ (key >> 15);
return key;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Insert a key assigned to a vertex index in the hash table
// return the position in the array, where the key was inserted
// if the key was already in the array, e.g. other kernel was processing
// the same edge in the uniform grid, return false. This way, the kernel will
// not generate a new vertex. If the vertex was not created jet, the address in
// the array is returned so that the calling kernel can save this position
// of the vertex in the hash table
// v_gindex key is a unique global index assigned to the vertex
// Hash table:
// struct HashTable {
// int* key;
// int* addr;
// int t_size;
// };
// v_addr contains the position in the key array, where the key = v_gindex was stored
__device__ bool insert_vertex_key(const int v_gindex, VertexHashTable ht_, int& v_addr)
{
//const int start_address = int(hash_function((uint)v_gindex) % ht_.t_size);
// open hashing
//int h = start_address;
int h = int(hash_function((uint)v_gindex) % ht_.t_size);
int e = 1;
for (int loop = 0; loop < 128; loop++) {
int old = atomicCAS(&ht_.key[h], EMPTY_BUCKET_32, v_gindex);
if (old == EMPTY_BUCKET_32) {
v_addr = h;
return true;
}
else if (v_gindex == old) {
// vertex key already in table
return false;
}
else {
// step with linear probing
h = (h + e*e) % ht_.t_size;
e = e + 1;
//if (h == start_address) {
// printf("ERROR: can't find free bucket for %d\n", v_gindex);
// return false;
//}
}
}
return false;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// a probably faster strategy to reduce number of glabal memory access
// This function sets vertex and normal using an atomic counter.
// Keep the address where vertex was stored, therefore the hast table knows the address of vertices and normals in vertex and normal arrays
// It resturn the address in the hash table where the address of vertex and normal are stored
__device__ int insert_vertex(const int v_gindex, VertexHashTable ht_, Vertices v_, const float4 vc, const float4 vn)
{
const int start_address = int(hash_function((uint)v_gindex) % ht_.t_size);
// open hashing
int h = start_address;
int e = 1;
for (int loop = 0; loop < 128; loop++) {
int old = atomicCAS(&ht_.key[h], EMPTY_BUCKET_32, v_gindex);
if (old == EMPTY_BUCKET_32) {
const int a_ = atomicAdd(v_.t_size, 1);
v_.vertices[a_] = vc;
v_.normals[a_] = vn;
ht_.addr[h] = a_;
return h;
}
else if (v_gindex == old) {
// vertex key already in table
return h;
}
else {
// step with linear probing
h = (h + e*e) % ht_.t_size;
e = e + 1;
if (h == start_address) {
printf("ERROR: can't find free bucket for %d\n", v_gindex);
return -1;
}
}
}
return -1;
}
__device__ int insert_vertex_fast(const int v_gindex, VertexHashTable ht_, Vertices v_,int& address)
{
const int start_address = int(hash_function((uint)v_gindex) % ht_.t_size);
// open hashing
int h = start_address;
int e = 1;
for (int loop = 0; loop < 128; loop++) {
int old = atomicCAS(&ht_.key[h], EMPTY_BUCKET_32, v_gindex);
if (old == EMPTY_BUCKET_32) {
const int a_ = atomicAdd(v_.t_size, 1);
//v_.vertices[a_] = vc;
//v_.normals[a_] = vn;
ht_.addr[h] = a_;
address = a_;
return h;
}
else if (v_gindex == old) {
// vertex key already in table
address = -1;
return h;
}
else {
// step with linear probing
h = (h + e*e) % ht_.t_size;
e = e + 1;
if (h == start_address) {
printf("ERROR: can't find free bucket for %d\n", v_gindex);
return -1;
}
}
}
return -1;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// find vertex global index in hash table
// Values were store at the hash address with open hashing and
__device__ int find_vertex(const int gl_index, VertexHashTable ht_)
{
// compute hash for global index
const int pos = int(hash_function((uint)gl_index) % ht_.t_size);
// open hashing with quadratic probing
int h = pos;
int e = 1;
for (int loop = 0; loop < 128; loop++) {
if (ht_.key[h] == gl_index) {
return ht_.addr[h];
}
else {
// step with linear probing
h = (h + e*e) % ht_.t_size;
e = e + 1;
}
}
printf("ERROR: can't find gl_index in hash table: gl_index %d at %d\n",gl_index, pos);
return -1;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 64 bit hash table to use Cantor's pairing function
// 64 bit mix function
__device__ unsigned long long hash64shift(unsigned long long key)
{
key = (~key) + (key << 21); // key = (key << 21) - key - 1;
key = key ^ (key >> 24);
key = (key + (key << 3)) + (key << 8); // key * 265
key = key ^ (key >> 14);
key = (key + (key << 2)) + (key << 4); // key * 21
key = key ^ (key >> 28);
key = key + (key << 31);
return key;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Insert the unique id of a halfedge obtained using the bijective Cantor's pairing function
// into the hast table. At the same position in array, save actuall have edge address, which
// will be used later to connect twin edges.
__device__ bool insert_halfedge_id(const int t_size, unsigned long long *he_table, int2* he_ids, int he_addr, int v0, int v1)
{
//const unsigned long long EMPTY_BUCKET = 0ull;
// compute pairing function value
unsigned long long x = (unsigned long long)v0;
unsigned long long y = (unsigned long long)v1;
unsigned long long he_id = (x < y) ? y : x;
he_id = he_id + (x + y) * (x + y + 1) / 2ull;
// evalue hash function
unsigned long long l_size = (unsigned long long)t_size;
//unsigned long long he_id = (v0 < v1) ? (unsigned long long)v0 | ((unsigned long long)v1 << 32) : (unsigned long long)v1 | ((unsigned long long)v0 << 32);
//const int start_address = int(hash64shift(he_id) % l_size);
const int start_address = int( he_id % l_size );
// open hashing
int h = start_address;
int e = 1;
for (int loop = 0; loop < 128; loop++) {
unsigned long long old = atomicCAS(&he_table[h], EMPTY_BUCKET_64, he_id);
if (old == EMPTY_BUCKET_64 || old == he_id) {
if (v0 < v1) {
he_ids[h].x = he_addr;
}
else {
he_ids[h].y = he_addr;
}
return true;
}
else {
// step with linear probing
h = (h + e*e) % t_size;
e = e + 1;
if (h == start_address) {
printf("ERROR: he can't find free bucket for %d\n", he_id);
return false;
}
}
}
return false;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute the cell vertices from uniform grid and cell indices
// Use spacing to compute vertex position
__device__ void cell_vertices(float3 v[8], const int i, const int j, const int k, UGrid ugrid)
{
v[0].x = ugrid.x0 + i * ugrid.dx;
v[0].y = ugrid.y0 + j * ugrid.dy;
v[0].z = ugrid.z0 + k * ugrid.dz;
v[1].x = v[0].x + ugrid.dx;
v[1].y = v[0].y;
v[1].z = v[0].z;
v[2].x = v[0].x;
v[2].y = v[0].y + ugrid.dy;
v[2].z = v[0].z;
v[3].x = v[0].x + ugrid.dx;
v[3].y = v[0].y + ugrid.dy;
v[3].z = v[0].z;
v[4].x = v[0].x;
v[4].y = v[0].y;
v[4].z = v[0].z + ugrid.dz;
v[5].x = v[0].x + ugrid.dx;
v[5].y = v[0].y;
v[5].z = v[0].z + ugrid.dz;
v[6].x = v[0].x;
v[6].y = v[0].y + ugrid.dy;
v[6].z = v[0].z + ugrid.dz;
v[7].x = v[0].x + ugrid.dx;
v[7].y = v[0].y + ugrid.dy;
v[7].z = v[0].z + ugrid.dz;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute gradient of scalar field at the vertices
// Use central differences, at the boundaries use forward
// or backward differences correspondigly
__device__ void gradient(float3 n[8], hipTextureObject_t u, UGrid ugrid, const int i_index, const int j_index, const int k_index)
{
const int idim = ugrid.idim;
const int jdim = ugrid.jdim;
const int kdim = ugrid.kdim;
const float dx = ugrid.dx;
const float dy = ugrid.dy;
const float dz = ugrid.dz;
int v0, v1;
float f = 2.f;
auto u_index = [](const int dim, int i, float& f) {
f = (i<0) || (i >= dim) ? 1 : 2;
i = (i<0) ? 0 : i;
i = (i >= dim) ? dim - 1 : i;
return i;
};
// 8 vertices
// v0, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[0].x = (tex3D<float>(u, v1, j_index, k_index) - tex3D<float>(u, v0, j_index, k_index)) / (f * dx);
// v0, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[0].y = (tex3D<float>(u, i_index, v1, k_index) - tex3D<float>(u, i_index, v0, k_index)) / (f * dy);
// v0, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1;
n[0].z = (tex3D<float>(u, i_index, j_index, v1) - tex3D<float>(u, i_index, j_index, v0)) / (f * dz);
// v1, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[1].x = (tex3D<float>(u, v1, j_index, k_index) - tex3D<float>(u, v0, j_index, k_index)) / (f * dx);
// v1, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[1].y = (tex3D<float>(u, i_index+1, v1, k_index) - tex3D<float>(u, i_index+1, v0, k_index)) / (f * dy);
// v1, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1;
n[1].z = (tex3D<float>(u, i_index+1, j_index, v1) - tex3D<float>(u, i_index+1, j_index, v0)) / (f * dz);
// v2, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[2].x = (tex3D<float>(u, v1, j_index+1, k_index) - tex3D<float>(u, v0, j_index+1, k_index)) / (f * dx);
// v2, y
v0 = j_index;
v1 = u_index(jdim,j_index + 2, f);
n[2].y = (tex3D<float>(u, i_index, v1, k_index) - tex3D<float>(u, i_index, v0, k_index)) / (f * dy);
// v2, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1, f;
n[2].z = (tex3D<float>(u, i_index, j_index+1, v1) - tex3D<float>(u, i_index, j_index+1, v0)) / (f * dz);
// v3, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[3].x = (tex3D<float>(u, v1, j_index+1, k_index) - tex3D<float>(u, v0, j_index+1, k_index)) / (f * dx);
// v3, y
v0 = j_index;
v1 = u_index(jdim, j_index + 2, f);
n[3].y = (tex3D<float>(u, i_index+1, v1, k_index) - tex3D<float>(u, i_index+1, v0, k_index)) / (f * dy);
// v3, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1;
n[3].z = (tex3D<float>(u, i_index+1, j_index+1, v1) - tex3D<float>(u, i_index+1, j_index+1, v0)) / (f * dz);
// v4, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[4].x = (tex3D<float>(u, v1, j_index, k_index+1) - tex3D<float>(u, v0, j_index, k_index+1)) / (f * dx);
// v4, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[4].y = (tex3D<float>(u, i_index, v1, k_index+1) - tex3D<float>(u, i_index, v0, k_index+1)) / (f * dy);
// v4, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[4].z = (tex3D<float>(u, i_index, j_index, v1) - tex3D<float>(u, i_index, j_index, v0)) / (f * dz);
// v5, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[5].x = (tex3D<float>(u, v1, j_index, k_index+1) - tex3D<float>(u, v0, j_index, k_index+1)) / (f * dx);
// v5, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[5].y = (tex3D<float>(u, i_index+1, v1, k_index+1) - tex3D<float>(u, i_index+1, v0, k_index+1)) / (f * dy);
// v5, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[5].z = (tex3D<float>(u, i_index+1, j_index, v1) - tex3D<float>(u, i_index+1, j_index, v0)) / (f * dz);
// v6, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[6].x = (tex3D<float>(u, v1, j_index+1, k_index+1) - tex3D<float>(u, v0, j_index+1, k_index+1)) / (f * dx);
// v6, y
v0 = j_index;
v1 = u_index(jdim, j_index + 2, f);
n[6].y = (tex3D<float>(u, i_index, v1, k_index+1) - tex3D<float>(u, i_index, v0, k_index+1)) / (f * dy);
// v6, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[6].z = (tex3D<float>(u, i_index, j_index+1, v1) - tex3D<float>(u, i_index, j_index+1, v0)) / (f * dz);
// v7, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[7].x = (tex3D<float>(u, v1, j_index + 1, k_index + 1) - tex3D<float>(u, v0, j_index + 1, k_index + 1)) / (f * dx);
// v7, y
v0 = j_index;
v1 = u_index(jdim, j_index + 2, f);
n[7].y = (tex3D<float>(u, i_index + 1, v1, k_index + 1) - tex3D<float>(u, i_index + 1, v0, k_index + 1)) / (f * dy);
// v7, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[7].z = (tex3D<float>(u, i_index+1, j_index+1, v1) - tex3D<float>(u, i_index + 1, j_index + 1, v0)) / (f * dz);
}
__device__ void gradientShared(const int tr, float3 n[8], hipTextureObject_t u, UGrid ugrid, const int i_index, const int j_index, const int k_index)
{
const int idim = ugrid.idim;
const int jdim = ugrid.jdim;
const int kdim = ugrid.kdim;
const float dx = ugrid.dx;
const float dy = ugrid.dy;
const float dz = ugrid.dz;
int v0, v1;
float f = 2.f;
auto u_index = [](const int dim, int i, float& f) {
f = (i<0) || (i >= dim) ? 1 : 2;
i = (i<0) ? 0 : i;
i = (i >= dim) ? dim - 1 : i;
return i;
};
// 8 vertices
// v0, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[tr].x = (tex3D<float>(u, v1, j_index, k_index) - tex3D<float>(u, v0, j_index, k_index)) / (f * dx);
// v0, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[tr].y = (tex3D<float>(u, i_index, v1, k_index) - tex3D<float>(u, i_index, v0, k_index)) / (f * dy);
// v0, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1;
n[tr].z = (tex3D<float>(u, i_index, j_index, v1) - tex3D<float>(u, i_index, j_index, v0)) / (f * dz);
// v1, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[tr+1].x = (tex3D<float>(u, v1, j_index, k_index) - tex3D<float>(u, v0, j_index, k_index)) / (f * dx);
// v1, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[tr + 1].y = (tex3D<float>(u, i_index + 1, v1, k_index) - tex3D<float>(u, i_index + 1, v0, k_index)) / (f * dy);
// v1, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1;
n[tr + 1].z = (tex3D<float>(u, i_index + 1, j_index, v1) - tex3D<float>(u, i_index + 1, j_index, v0)) / (f * dz);
// v2, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[tr + 2].x = (tex3D<float>(u, v1, j_index + 1, k_index) - tex3D<float>(u, v0, j_index + 1, k_index)) / (f * dx);
// v2, y
v0 = j_index;
v1 = u_index(jdim, j_index + 2, f);
n[tr + 2].y = (tex3D<float>(u, i_index, v1, k_index) - tex3D<float>(u, i_index, v0, k_index)) / (f * dy);
// v2, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1, f;
n[tr + 2].z = (tex3D<float>(u, i_index, j_index + 1, v1) - tex3D<float>(u, i_index, j_index + 1, v0)) / (f * dz);
// v3, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[tr + 3].x = (tex3D<float>(u, v1, j_index + 1, k_index) - tex3D<float>(u, v0, j_index + 1, k_index)) / (f * dx);
// v3, y
v0 = j_index;
v1 = u_index(jdim, j_index + 2, f);
n[tr + 3].y = (tex3D<float>(u, i_index + 1, v1, k_index) - tex3D<float>(u, i_index + 1, v0, k_index)) / (f * dy);
// v3, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1;
n[tr + 3].z = (tex3D<float>(u, i_index + 1, j_index + 1, v1) - tex3D<float>(u, i_index + 1, j_index + 1, v0)) / (f * dz);
// v4, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[tr + 4].x = (tex3D<float>(u, v1, j_index, k_index + 1) - tex3D<float>(u, v0, j_index, k_index + 1)) / (f * dx);
// v4, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[tr + 4].y = (tex3D<float>(u, i_index, v1, k_index + 1) - tex3D<float>(u, i_index, v0, k_index + 1)) / (f * dy);
// v4, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[tr + 4].z = (tex3D<float>(u, i_index, j_index, v1) - tex3D<float>(u, i_index, j_index, v0)) / (f * dz);
// v5, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[tr + 5].x = (tex3D<float>(u, v1, j_index, k_index + 1) - tex3D<float>(u, v0, j_index, k_index + 1)) / (f * dx);
// v5, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[tr + 5].y = (tex3D<float>(u, i_index + 1, v1, k_index + 1) - tex3D<float>(u, i_index + 1, v0, k_index + 1)) / (f * dy);
// v5, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[tr + 5].z = (tex3D<float>(u, i_index + 1, j_index, v1) - tex3D<float>(u, i_index + 1, j_index, v0)) / (f * dz);
// v6, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[tr + 6].x = (tex3D<float>(u, v1, j_index + 1, k_index + 1) - tex3D<float>(u, v0, j_index + 1, k_index + 1)) / (f * dx);
// v6, y
v0 = j_index;
v1 = u_index(jdim, j_index + 2, f);
n[tr + 6].y = (tex3D<float>(u, i_index, v1, k_index + 1) - tex3D<float>(u, i_index, v0, k_index + 1)) / (f * dy);
// v6, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[tr + 6].z = (tex3D<float>(u, i_index, j_index + 1, v1) - tex3D<float>(u, i_index, j_index + 1, v0)) / (f * dz);
// v7, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[tr + 7].x = (tex3D<float>(u, v1, j_index + 1, k_index + 1) - tex3D<float>(u, v0, j_index + 1, k_index + 1)) / (f * dx);
// v7, y
v0 = j_index;
v1 = u_index(jdim, j_index + 2, f);
n[tr + 7].y = (tex3D<float>(u, i_index + 1, v1, k_index + 1) - tex3D<float>(u, i_index + 1, v0, k_index + 1)) / (f * dy);
// v7, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[tr + 7].z = (tex3D<float>(u, i_index + 1, j_index + 1, v1) - tex3D<float>(u, i_index + 1, j_index + 1, v0)) / (f * dz);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// old fashined way to compute the gradient from a uniform grid
__device__ void gradient2(float3 n[8], hipTextureObject_t u, const UGrid ugrid, const int i_index, const int j_index, const int k_index)
{
for (int _k = 0; _k <= 1; _k++) {
int k = k_index + _k;
for (int _j = 0; _j <= 1; _j++) {
int j = j_index + _j;
for (int _i = 0; _i <= 1; _i++) {
int i = i_index + _i;
// set gradient at vertex
unsigned int v_index{ 0 };
v_index |= (_i) & 1;
v_index |= (_j << 1) & 2;
v_index |= (_k << 2) & 4;
// x-component
float factor{ 1.f };
int i1{ 0 };
int i2{ 0 };
if (i == 0) {
i1 = i;
i2 = i + 1;
}
else if (i == ugrid.idim - 1) {
i1 = i - 1;
i2 = i;
}
else {
i1 = i - 1;
i2 = i + 1;
factor = 2.f;
}
n[v_index].x = (tex3D<float>(u, i2, j, k) - tex3D<float>(u, i1, j, k)) / (factor * ugrid.dx);
// y-component
factor = 1.f;
if (j == 0) {
i1 = j;
i2 = j + 1;
}
else if (j == ugrid.jdim - 1) {
i1 = j - 1;
i2 = j;
}
else {
i1 = j - 1;
i2 = j + 1;
factor = 2.f;
}
n[v_index].y = (tex3D<float>(u, i, i2, k) - tex3D<float>(u, i, i1, k)) / (factor * ugrid.dy);
// z-component
factor = 1.f;
if (k == 0) {
i1 = k;
i2 = k + 1;
}
else if (k == ugrid.kdim - 1) {
i1 = k - 1;
i2 = k;
}
else {
i1 = k - 1;
i2 = k + 1;
factor = 2.f;
}
n[v_index].z = (tex3D<float>(u, i, j, i2) - tex3D<float>(u, i, j, i1)) / (factor * ugrid.dz);
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// CUDA GLOBAL FUNCTIONS
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// init hash table
__global__ void init_hash_table(VertexHashTable ht_)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= ht_.t_size)
return;
ht_.key[tid] = EMPTY_BUCKET_32;
//ht_.addr[tid] = -1;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// TOPLOGICALLY CORRECT MARCHING CUBES
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Marching Cubes
// Count cells being intersected by isosurface
// Coompute a nr. of vertices
__global__ void mc_count(CellsIds cells, AmbiguousCells acells, int* v_count, int* a_count, float i0, hipTextureObject_t v_data, UGrid ugrid, MC_lookup l_tables)
{
const int gl_index = blockIdx.x * blockDim.x + threadIdx.x;
const int i = ugrid.i_index(gl_index);
const int j = ugrid.j_index(gl_index);
const int k = ugrid.k_index(gl_index);
if (i >= ugrid.idim - 1 || j >= ugrid.jdim - 1 || k >= ugrid.kdim - 1) {
return;
}
// scalar values at vertices
float u[8];
u[0] = tex3D<float>(v_data, i, j, k);
u[1] = tex3D<float>(v_data, i + 1, j, k);
u[2] = tex3D<float>(v_data, i, j + 1, k);
u[3] = tex3D<float>(v_data, i + 1, j + 1, k);
u[4] = tex3D<float>(v_data, i, j, k + 1);
u[5] = tex3D<float>(v_data, i + 1, j, k + 1);
u[6] = tex3D<float>(v_data, i, j + 1, k + 1);
u[7] = tex3D<float>(v_data, i + 1, j + 1, k + 1);
// compute case
uint i_case{ 0 };
i_case = i_case + ((uint)(u[0] >= i0));
i_case = i_case + ((uint)(u[1] >= i0)) * 2;
i_case = i_case + ((uint)(u[2] >= i0)) * 4;
i_case = i_case + ((uint)(u[3] >= i0)) * 8;
i_case = i_case + ((uint)(u[4] >= i0)) * 16;
i_case = i_case + ((uint)(u[5] >= i0)) * 32;
i_case = i_case + ((uint)(u[6] >= i0)) * 64;
i_case = i_case + ((uint)(u[7] >= i0)) * 128;
// compute number of vertices computed for this cell
const ushort e_ = l_tables.e_[i_case];
int nr_vertices = numberOfSetBits<ushort>(e_);
// copy into global memory
if (nr_vertices > 0) {
// get an address
if (e_ & BIT_16) {
atomicAdd(a_count, nr_vertices);
//acells.add(gl_index);
acells.a_cells[atomicAdd(acells.t_size, 1)] = gl_index;
}
else {
atomicAdd(v_count, nr_vertices);
cells.cells_[atomicAdd(cells.t_size,1)] = gl_index;
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Based on standard Marching Cubes compute cell triangulation for non ambiguous cases
// Save cell global id to process ambiguous cases later
// Parameters
// @param i0 is the iso-value
// @param v_data is a 3D texture with the volume data
// @param ugird contains information describing the uniform grid
// @param l_tables is a structure with pointers to the lookup table for MC
// struct MC_lookup {
// unsigned short* e_pattern;
// int* t_pattern;
// uint* t_ambig;
// };
// @param nr_cells is to the total nr.of cells intersected by the iso - surfece
// @param cellid is a field with the global id of the cells in the uniform grid
// @param c_addr is a atomic counter to compute the index in the array a_cells with ids of the cells which have an ambiguous case
// @param a_cell a point to a field containing the ids of the ambiguous cases to be processed later
// @param ht_ hash table to compute unique vertex index
// @param v_ a structure containing all pointer required for vertex processing
// struct Vertices {
// float4* vertices{ nullptr };
// float4* normals{ nullptr };
// int* t_size{ nullptr };
// };
// @param t_ a structure containing all pointer required for triangle processing
// struct Triangles {
// int3* triangles{ nullptr };
// int* t_size{ nullptr };
// };
__global__ void mc_slice(const float i0, hipTextureObject_t v_data, UGrid ugrid, MC_lookup l_tables, const int nr_cells, CellsIds cells, VertexHashTable ht_, Vertices v_, Triangles t_)
{
__shared__ int4 tris[5 * MC_BLOCKSIZE];
// get thread id
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int bz = blockDim.x;
const int tr = threadIdx.x;
if (nr_cells <= tid)
return;
for (int i = tr; i < 5 * bz; i += bz) {
tris[i].x = -1;
}
//__syncthreads();
// compute grid indices from global index
const int gl_index = cells.cells_[tid];
const int i_index = ugrid.i_index(gl_index);
const int j_index = ugrid.j_index(gl_index);
const int k_index = ugrid.k_index(gl_index);
// get address to store vertices
//const int vtpos = cells.vtpos_[tid];
// construct 8 cell vertices
float3 v[8];
cell_vertices(v, i_index, j_index, k_index, ugrid);
// scalar values at vertices
float u[8];
u[0] = tex3D<float>(v_data, i_index, j_index, k_index);
u[1] = tex3D<float>(v_data, i_index + 1, j_index, k_index);
u[2] = tex3D<float>(v_data, i_index, j_index + 1, k_index);
u[3] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index);
u[4] = tex3D<float>(v_data, i_index, j_index, k_index + 1);
u[5] = tex3D<float>(v_data, i_index + 1, j_index, k_index + 1);
u[6] = tex3D<float>(v_data, i_index, j_index + 1, k_index + 1);
u[7] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index + 1);
// compute normals at vertices
float3 n[8];
gradient(n, v_data, ugrid, i_index, j_index, k_index);
// compute case
uint i_case{ 0 };
i_case = i_case + ((uint)(u[0] >= i0));
i_case = i_case + ((uint)(u[1] >= i0)) * 2;
i_case = i_case + ((uint)(u[2] >= i0)) * 4;
i_case = i_case + ((uint)(u[3] >= i0)) * 8;
i_case = i_case + ((uint)(u[4] >= i0)) * 16;
i_case = i_case + ((uint)(u[5] >= i0)) * 32;
i_case = i_case + ((uint)(u[6] >= i0)) * 64;
i_case = i_case + ((uint)(u[7] >= i0)) * 128;
// ambiguous cases are processed in the next pass
const ushort e_ = l_tables.e_[i_case];
// Compute intersection with edges
const unsigned long long gei_pattern_ = 670526590282893600ull;
const unsigned char l_edges_[12]{ 16, 49, 50, 32, 84, 117, 118, 100, 64, 81, 115, 98 };
int v_gindex[12];
ushort flag{ 1 };
//const ushort e_pattern = (ushort)l_tables.ePattern(i_case); // l_tables.e_pattern[i_case];
for (int e = 0; e < 12; e++) {
v_gindex[e] = -1;
if (flag & e_) {
// get unique vertex index
// compute vertex global index
const int ix = i_index + (int)((gei_pattern_ >> 5 * e) & 1); // global_edge_id[eg][0];
const int iy = j_index + (int)((gei_pattern_ >> (5 * e + 1)) & 1); // global_edge_id[eg][1];
const int iz = k_index + (int)((gei_pattern_ >> (5 * e + 2)) & 1); // global_edge_id[eg][2];
const int off_val = (int)((gei_pattern_ >> (5 * e + 3)) & 3);
int address{ -1 };
v_gindex[e] = insert_vertex_fast(int(9 * ugrid.gl_index(ix, iy, iz) + off_val), ht_, v_,address);
if (address > -1) {
// compute edge inersection
// compute local coordinate along edge
const int v0 = (l_edges_[e] & 0xF);
const int v1 = (l_edges_[e] >> 4) & 0xF;
const float l = (i0 - u[v0]) / (u[v1] - u[v0]);
float4 vp = make_float4(v[v0].x + l*(v[v1].x - v[v0].x), v[v0].y + l*(v[v1].y - v[v0].y), v[v0].z + l*(v[v1].z - v[v0].z), 1.f);
float4 np = make_float4(n[v0].x + l*(n[v1].x - n[v0].x), n[v0].y + l*(n[v1].y - n[v0].y), n[v0].z + l*(n[v1].z - n[v0].z), 0.f);
const float length = std::sqrt(np.x * np.x + np.y * np.y + np.z * np.z);
np.x = np.x / length;
np.y = np.y / length;
np.z = np.z / length;
//np.w = 0.f;
v_.vertices[address] = vp;
v_.normals[address] = np;
}
}
flag <<= 1;
}
// compute triangles
//const unsigned char* t_ambig = l_tables.t_ambig;
unsigned long long tl_ = l_tables.t_[i_case];
for (int t = 0; t < 16; t += 3) {
const int v0 = (int)((tl_ >> (4 * t)) & 0xFull);
//if (((tl_ >> (4*t))& 0xFull) == 0xF) {
if (v0 == 0xF) {
// there are no more triangles
break;
}
// save tirangle
//const int v0 = (int)((tl_ >> (4 * t)) & 0xFull);
const int v1 = (int)((tl_ >> (4 * (t + 1))) & 0xFull);
const int v2 = (int)((tl_ >> (4 * (t + 2))) & 0xFull);
//t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[v0], v_gindex[v1], v_gindex[v2], 0);
tris[tr + (t/3) * bz] = make_int4(v_gindex[v0], v_gindex[v1], v_gindex[v2], 0);
//t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[((tl_ >> (4 * t)) & 0xFull)], v_gindex[((tl_ >> (4 * (t + 1))) & 0xFull)], v_gindex[((tl_ >> (4 * (t + 2))) & 0xFull)], 0);
}
__syncthreads();
// write tris
for (int i = tr; i < 5 * bz; i += bz) {
if (tris[i].x > -1) {
t_.triangles[atomicAdd(t_.t_size, 1)] = tris[i];
}
//__syncthreads();
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute the triangulation of a cell with an ambiguous case
__global__ void t_slice(const float i0, hipTextureObject_t v_data, UGrid ugrid, MC_lookup l_tables, const int nr_cells, AmbiguousCells acells, VertexHashTable ht_, Vertices v_, Triangles t_)
{
__shared__ float3 n[8 * AMB_BLOCKSIZE];
__shared__ float3 p[8 * AMB_BLOCKSIZE];
// get cell id
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (nr_cells <= tid)
return;
// compute grid indices from global index
const int gl_index = acells.a_cells[tid];
const int i_index = ugrid.i_index(gl_index);
const int j_index = ugrid.j_index(gl_index);
const int k_index = ugrid.k_index(gl_index);
const int tr = threadIdx.x * 8;
// construct 8 cell vertices
//float3 p[8];
//cell_vertices(p, i_index, j_index, k_index, ugrid);
p[tr].x = ugrid.x0 + i_index * ugrid.dx;
p[tr].y = ugrid.y0 + j_index * ugrid.dy;
p[tr].z = ugrid.z0 + k_index * ugrid.dz;
p[tr + 1].x = p[tr].x + ugrid.dx;
p[tr + 1].y = p[tr].y;
p[tr + 1].z = p[tr].z;
p[tr + 2].x = p[tr].x;
p[tr + 2].y = p[tr].y + ugrid.dy;
p[tr + 2].z = p[tr].z;
p[tr + 3].x = p[tr].x + ugrid.dx;
p[tr + 3].y = p[tr].y + ugrid.dy;
p[tr + 3].z = p[tr].z;
p[tr + 4].x = p[tr].x;
p[tr + 4].y = p[tr].y;
p[tr + 4].z = p[tr].z + ugrid.dz;
p[tr + 5].x = p[tr].x + ugrid.dx;
p[tr + 5].y = p[tr].y;
p[tr + 5].z = p[tr].z + ugrid.dz;
p[tr + 6].x = p[tr].x;
p[tr + 6].y = p[tr].y + ugrid.dy;
p[tr + 6].z = p[tr].z + ugrid.dz;
p[tr + 7].x = p[tr].x + ugrid.dx;
p[tr + 7].y = p[tr].y + ugrid.dy;
p[tr + 7].z = p[tr].z + ugrid.dz;
// scalar values at vertices
float F[8];
F[0] = tex3D<float>(v_data, i_index, j_index, k_index);
F[1] = tex3D<float>(v_data, i_index + 1, j_index, k_index);
F[2] = tex3D<float>(v_data, i_index, j_index + 1, k_index);
F[3] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index);
F[4] = tex3D<float>(v_data, i_index, j_index, k_index + 1);
F[5] = tex3D<float>(v_data, i_index + 1, j_index, k_index + 1);
F[6] = tex3D<float>(v_data, i_index, j_index + 1, k_index + 1);
F[7] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index + 1);
// compute normals at vertices
//float3 n[8];
gradientShared(tr, n, v_data, ugrid, i_index, j_index, k_index);
__syncthreads();
// compute case
uint i_case{ 0 };
i_case = i_case + ((uint)(F[0] >= i0));
i_case = i_case + ((uint)(F[1] >= i0)) * 2;
i_case = i_case + ((uint)(F[2] >= i0)) * 4;
i_case = i_case + ((uint)(F[3] >= i0)) * 8;
i_case = i_case + ((uint)(F[4] >= i0)) * 16;
i_case = i_case + ((uint)(F[5] >= i0)) * 32;
i_case = i_case + ((uint)(F[6] >= i0)) * 64;
i_case = i_case + ((uint)(F[7] >= i0)) * 128;
// Compute intersection with edges
const unsigned long long gei_pattern_ = 670526590282893600ull;
const unsigned char l_edges_[12]{ 16, 49, 50, 32, 84, 117, 118, 100, 64, 81, 115, 98 };
// compute intersection with cell edges
float ecoord[12]{};
int v_gindex[12]{};
ushort flag{ 1 };
ushort e_ = l_tables.e_[i_case];
for (int e = 0; e < 12; e++) {
v_gindex[e] = -1;
//ecoord[e] = 0.f;
if (flag & e_) {
// get unique vertex index
// compute vertex global index
const int ix = i_index + (int)((gei_pattern_ >> 5 * e) & 1); // global_edge_id[eg][0];
const int iy = j_index + (int)((gei_pattern_ >> (5 * e + 1)) & 1); // global_edge_id[eg][1];
const int iz = k_index + (int)((gei_pattern_ >> (5 * e + 2)) & 1); // global_edge_id[eg][2];
const int off_val = (int)((gei_pattern_ >> (5 * e + 3)) & 3);
int address{ -1 };
v_gindex[e] = insert_vertex_fast(int(9 * ugrid.gl_index(ix, iy, iz) + off_val), ht_, v_,address);
// compute edge inersection
// compute local coordinate along edge
const int v0 = (l_edges_[e] & 0xF);
const int v1 = (l_edges_[e] >> 4) & 0xF;
const float l = (i0 - F[v0]) / (F[v1] - F[v0]);
if (address > -1) {
v_.vertices[address] = make_float4(p[tr + v0].x + l*(p[tr + v1].x - p[tr + v0].x), p[tr + v0].y + l*(p[tr + v1].y - p[tr + v0].y), p[tr + v0].z + l*(p[tr + v1].z - p[tr + v0].z), 1.f);
float4 np = make_float4(n[tr + v0].x + l*(n[tr + v1].x - n[tr + v0].x), n[tr + v0].y + l*(n[tr + v1].y - n[tr + v0].y), n[tr + v0].z + l*(n[tr + v1].z - n[v0].z), 0.f);
const float length = std::sqrt(np.x * np.x + np.y * np.y + np.z * np.z);
np.x = np.x / length;
np.y = np.y / length;
np.z = np.z / length;
v_.normals[address] = np;
}
//v_gindex[e] = v_.add(ht_, int(9 * ugrid.gl_index(ix, iy, iz) + off_val), vp, np);
// remember local coordinate along edge
ecoord[e] = l;
}
flag <<= 1;
}
// compute oriented contours
// 1. build segments
// 2. connect segments
// build up segments
// set segments map
unsigned char segm_[12] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
auto set_segm = [](const int ei, const int eo, unsigned char segm_[12]) {
segm_[ei] &= 0xF0;
segm_[ei] |= ((unsigned char)eo) & 0xF;
segm_[eo] &= 0xF;
segm_[eo] |= ((unsigned char)ei) << 4;
};
auto get_segm = [](const int e, const int pos, unsigned char segm_[12]) {
if (pos == 0)
return (int)(segm_[e] & 0xF);
else
return (int)((segm_[e] >> 4) & 0xF);
};
auto is_segm_set = [](const int e, unsigned char segm_[12]) {
return (segm_[e] != 0xFF);
};
auto unset_segm = [](const int e, unsigned char segm_[12]) {
segm_[e] = 0xFF;
};
// In order to compute oriented segments, the hexahedron has to be flatten.
// The insides of the faces of the hexahedron have to be all at the same
// side of the flattend hexa. This requires changing the order of the
// edges when reading from the faces
// code edges at face
unsigned short e_face_[6]{ (ushort)291, (ushort)18277, (ushort)18696, (ushort)10859, (ushort)33719, (ushort)38305 };
// code vertices at face
unsigned short v_face_[6]{ (ushort)12576, (ushort)25717, (ushort)5380, (ushort)29538, (ushort)8292, (ushort)30001 };
// reading edge from face
auto get_face_e = [e_face_](const int f, const int e) { return ((e_face_[f] >> (4 * e)) & 0xF); };
auto get_face_v = [v_face_](const int f, const int e) { return ((v_face_[f] >> (4 * e)) & 0xF); };
// compute oriented segments using the isoline scheme at the faces
auto asymptotic_decider = [](const float f0, const float f1, const float f2, const float f3) {
return (f0*f3 - f1*f2) / (f0 + f3 - f1 - f2);
};
uchar f_flag{ 0 };
for (int f = 0; f < 6; f++) {
// classify face
unsigned int f_case{ 0 };
const int v0 = get_face_v(f, 0);
const int v1 = get_face_v(f, 1);
const int v2 = get_face_v(f, 2);
const int v3 = get_face_v(f, 3);
const int e0 = get_face_e(f, 0);
const int e1 = get_face_e(f, 1);
const int e2 = get_face_e(f, 2);
const int e3 = get_face_e(f, 3);
const float f0 = F[v0];
const float f1 = F[v1];
const float f2 = F[v2];
const float f3 = F[v3];
if (f0 >= i0)
f_case |= BIT_1;
if (f1 >= i0)
f_case |= BIT_2;
if (f2 >= i0)
f_case |= BIT_3;
if (f3 >= i0)
f_case |= BIT_4;
switch (f_case)
{
case 1:
set_segm(e0, e3, segm_);
break;
case 2:
set_segm(e1, e0, segm_);
break;
case 3:
set_segm(e1, e3, segm_);
break;
case 4:
set_segm(e3, e2, segm_);
break;
case 5:
set_segm(e0, e2, segm_);
break;
case 6:
{
const float val = asymptotic_decider(f0, f1, f2, f3);
if (val > i0) {
set_segm(e3, e0, segm_);
set_segm(e1, e2, segm_);
}
else if (val < i0) {
set_segm(e1, e0, segm_);
set_segm(e3, e2, segm_);
}
else {
// set flag for this face
f_flag |= (1 << f);
float ec0 = ecoord[e0];
float ec1 = ecoord[e1];
float ec2 = ecoord[e2];
float ec3 = ecoord[e3];
if ((0x218 >> (f * 2)) & BIT_1) {
ec0 = 1 - ec0;
ec2 = 1 - ec2;
}
if ((0x218 >> (f * 2)) & BIT_2) {
ec1 = 1 - ec1;
ec3 = 1 - ec3;
}
if (ec1 < ec3 && ec0 > ec2) {
set_segm(e1, e0, segm_);
set_segm(e3, e2, segm_);
}
else if (ec1 > ec3 && ec0 < ec2) {
set_segm(e3, e0, segm_);
set_segm(e1, e2, segm_);
}
else {
return;
}
}
}
break;
case 7:
set_segm(e1, e2, segm_);
break;
case 8:
set_segm(e2, e1, segm_);
break;
case 9:
{
const double val = asymptotic_decider(f0, f1, f2, f3);
if (val > i0) {
set_segm(e0, e1, segm_);
set_segm(e2, e3, segm_);
}
else if (val < i0) {
set_segm(e0, e3, segm_);
set_segm(e2, e1, segm_);
}
else {
f_flag = (1 << f);
// singular case val == i0, there are no asymptotes
// check if there is a reasonable triangulation of the face
float ec0 = ecoord[e0];
float ec1 = ecoord[e1];
float ec2 = ecoord[e2];
float ec3 = ecoord[e3];
if ((0x218 >> (f * 2)) & BIT_1) {
ec0 = 1 - ec0;
ec2 = 1 - ec2;
}
if ((0x218 >> (f * 2)) & BIT_2) {
ec1 = 1 - ec1;
ec3 = 1 - ec3;
}
if (ec1 < ec3 && ec0 > ec2) {
set_segm(e0, e1, segm_);
set_segm(e2, e3, segm_);
}
else if (ec1 > ec3 && ec0 < ec2) {
set_segm(e0, e3, segm_);
set_segm(e2, e1, segm_);
}
else {
return;
}
}
}
break;
case 10:
set_segm(e2, e0, segm_);
break;
case 11:
set_segm(e2, e3, segm_);
break;
case 12:
set_segm(e3, e1, segm_);
break;
case 13:
set_segm(e0, e1, segm_);
break;
case 14:
set_segm(e3, e0, segm_);
break;
default:
break;
}
}
// connect oriented segments into oriented contours
// closed contours are coded in 64 bit unsigned long long
// 1) Each entry has 4 bits
// 2) The first 4 entries are reserved for the size of the contours
// 3) The next 12 entries are the indices of the edges constituting the contorus
// The indices are numbers from 0 to 12
unsigned long long c_ = 0xFFFFFFFFFFFF0000;
// in the 4 first bits store size of contours
auto get_cnt_size = [](const int cnt, unsigned long long &c_) {
return (size_t)((c_ & (0xF << 4 * cnt)) >> 4 * cnt);
};
auto set_cnt_size = [](const int cnt, const int size, unsigned long long &c_) {
// unset contour size
c_ &= ~(0xF << 4 * cnt);
c_ |= (size << 4 * cnt);
};
// set corresponging edge
auto set_c = [](const int cnt, const int pos, const int val, unsigned long long &c_) {
const uint mask[4] = { 0x0, 0xF, 0xFF, 0xFFF };
const uint c_sz = c_ & mask[cnt];
const uint e = 16 + 4 * ((c_sz & 0xF) + ((c_sz & 0xF0) >> 4) + ((c_sz & 0xF00) >> 8) + pos);
c_ &= ~(((unsigned long long)0xF) << e);
c_ |= (((unsigned long long)val) << e);
};
// read edge from contour
auto get_c = [](const int cnt, const int pos, unsigned long long c_) {
const uint mask[4] = { 0x0, 0xF, 0xFF, 0xFFF };
const uint c_sz = (uint)(c_ & mask[cnt]);
const uint e = 16 + 4 * ((c_sz & 0xF) + ((c_sz & 0xF0) >> 4) + ((c_sz & 0xF00) >> 8) + pos);
return (int)((c_ >> e) & 0xF);
};
// connect oriented contours
uint cnt_{ 0 };
for (uint e = 0; e < 12; e++) {
if (is_segm_set(e, segm_)) {
uint eTo = get_segm(e, 0, segm_);
uint eIn = get_segm(e, 1, segm_);
uint eStart = e;
uint pos = 0;
set_c(cnt_, pos, eStart, c_);
while (eTo != eStart) {
pos = pos + 1;
set_c(cnt_, pos, eTo, c_);
eIn = eTo;
eTo = get_segm(eIn, 0, segm_);
unset_segm(eIn, segm_);
}
// set contour length
set_cnt_size(cnt_, pos + 1, c_);
// update number of contours
cnt_ = cnt_ + 1;
}
}
// compute intersection of opposite faces
float ui[2]{};
float vi[2]{};
float wi[2]{};
unsigned char q_sol{ 0 };
const float a = (F[0] - F[1])*(-F[6] + F[7] + F[4] - F[5]) - (F[4] - F[5])*(-F[2] + F[3] + F[0] - F[1]);
const float b = (i0 - F[0])*(-F[6] + F[7] + F[4] - F[5]) + (F[0] - F[1])*(F[6] - F[4]) - (i0 - F[4])*(-F[2] + F[3] + F[0] - F[1]) - (F[4] - F[5])*(F[2] - F[0]);
const float c = (i0 - F[0])*(F[6] - F[4]) - (i0 - F[4])*(F[2] - F[0]);;
float d = b*b - 4 * a*c;
if (d > 0) {
d = std::sqrt(d);
// compute u-coord of solutions
ui[0] = (-b - d) / (2 * a);
ui[1] = (-b + d) / (2 * a);
// compute v-coord of solutions
float g1 = F[0] * (1 - ui[0]) + F[1] * ui[0];
float g2 = F[2] * (1 - ui[0]) + F[3] * ui[0];
vi[0] = (i0 - g1) / (g2 - g1);
if (isnan(vi[0]) || isinf(vi[0])) {
vi[0] = -1.f;
}
g1 = F[0] * (1 - ui[1]) + F[1] * ui[1];
g2 = F[2] * (1 - ui[1]) + F[3] * ui[1];
vi[1] = (i0 - g1) / (g2 - g1);
if (isnan(vi[1]) || isinf(vi[1])) {
vi[1] = -1.f;
}
// compute w-coordinates of solutions
g1 = F[0] * (1 - ui[0]) + F[1] * ui[0];
g2 = F[4] * (1 - ui[0]) + F[5] * ui[0];
wi[0] = (i0 - g1) / (g2 - g1);
if (isnan(wi[0]) || isinf(wi[0])) {
wi[0] = -1.f;
}
g1 = F[0] * (1 - ui[1]) + F[1] * ui[1];
g2 = F[4] * (1 - ui[1]) + F[5] * ui[1];
wi[1] = (i0 - g1) / (g2 - g1);
if (isnan(wi[1]) || isinf(wi[1])) {
wi[1] = -1.f;
}
// correct values for roots of quadratic equations
// in case the asymptotic decider has failed
if (f_flag & BIT_1) { // face 1, w = 0;
if (wi[0] < wi[1]) wi[0] = 0;
else wi[1] = 0;
}
if (f_flag & BIT_2) { // face 2, w = 1
if (wi[0] > wi[1]) wi[1] = 1;
else wi[1] = 1;
}
if (f_flag & BIT_3) { // face 3, v = 0
if (vi[0] < vi[1]) vi[0] = 0;
else vi[1] = 0;
}
if (f_flag & BIT_4) { // face 4, v = 1
if (vi[0] > vi[1]) vi[0] = 1;
else vi[1] = 1;
}
if (f_flag & BIT_5) { // face 5, u = 0
if (ui[0] < ui[1]) ui[0] = 0;
else ui[1] = 0;
}
if (f_flag & BIT_6) { // face 6, u = 1
if (ui[0] > ui[1]) ui[0] = 1;
else ui[1] = 1;
}
// check solution intervals
if (0 < ui[0] && ui[0] < 1) {
q_sol |= 1;
}
if (0 < ui[1] && ui[1] < 1) {
q_sol |= 2;
}
if (0 < vi[0] && vi[0] < 1) {
q_sol |= 4;
}
if (0 < vi[1] && vi[1] < 1) {
q_sol |= 8;
}
if (0 < wi[0] && wi[0] < 1) {
q_sol |= 16;
}
if (0 < wi[1] && wi[1] < 1) {
q_sol |= 32;
}
}
// compute the number of solutions to the quadratic equation for a given face
auto nrQSolFace = [](const uint f, const unsigned char n) {
uint nr{ 0 };
switch (f) {
case 0:
if ((n & 0x5) == 0x5)
nr = nr + 1;
if ((n & 0xA) == 0xA)
nr = nr + 1;
break;
case 1:
if ((n & 0x11) == 0x11) nr = nr + 1;
if ((n & 0x22) == 0x22) nr = nr + 1;
break;
case 2:
if ((n & 0x18) == 0x18) nr = nr + 1;
if ((n & 0x24) == 0x24) nr = nr + 1;
break;
}
return nr;
};
// triangulate contours
// if all bits are set, then there are three pairs of nontrivial solutions
// to the quadratic equations. In this case, there is a tunnel or a contour
// with 12 vertices. If there are three contours, then there is a tunnel and
// one of the contorus with only three vertices is not part of it.
// Triangles are stored in global memory starting at offset
// count nr. of inner vertices to compute right global index
// first inner vertex has index cell_global_index + 3;
int v_count{ 3 };
if (numberOfSetBits<unsigned char>(q_sol) == 6) {
// there are at most three contours
// Possible cases:
// 1) a single contour with 12 vertices
// 2) two contours which build a tunnel
// 3) three contours, one has only 3 vertices and does not belong to the tunnel
// construct the six vertices of the inner hexagon
float3 hvt[6];
hvt[0].x = ui[0]; hvt[0].y = vi[0]; hvt[0].z = wi[0];
hvt[1].x = ui[0]; hvt[1].y = vi[0]; hvt[1].z = wi[1];
hvt[2].x = ui[1]; hvt[2].y = vi[0]; hvt[2].z = wi[1];
hvt[3].x = ui[1]; hvt[3].y = vi[1]; hvt[3].z = wi[1];
hvt[4].x = ui[1]; hvt[4].y = vi[1]; hvt[4].z = wi[0];
hvt[5].x = ui[0]; hvt[5].y = vi[1]; hvt[5].z = wi[0];
// construct vertices at intersections with the edges
auto e_vert = [&ecoord](const int e, const int i) {
const unsigned int l_coord[3]{ 1324855, 5299420, 16733440 };
unsigned char flag = (l_coord[i] >> (2 * e)) & 3;
if (flag == 3)
return ecoord[e];
else
return (float)(flag);
};
// if there are three contours, then there is a tunnel and one
// of the contours is not part of it.
unsigned char _not_tunnel = 0xF;
if (cnt_ == 3) {
// loop over the contorus
// triangulate the contour which is not part of
// the tunnel
const float uc_min = (ui[0] < ui[1]) ? ui[0] : ui[1];
const float uc_max = (ui[0] < ui[1]) ? ui[1] : ui[0];
for (int t = 0; t < (int)cnt_; t++) {
if (get_cnt_size(t, c_) == 3) {
float umin = 2;
float umax = -2;
uint e0 = get_c(t, 0, c_);
uint e1 = get_c(t, 1, c_);
uint e2 = get_c(t, 2, c_);
const float u_e0 = e_vert(e0, 0);
const float u_e1 = e_vert(e1, 0);
const float u_e2 = e_vert(e2, 0);
umin = (u_e0 < umin) ? u_e0 : umin;
umin = (u_e1 < umin) ? u_e1 : umin;
umin = (u_e2 < umin) ? u_e2 : umin;
umax = (u_e0 > umax) ? u_e0 : umax;
umax = (u_e1 > umax) ? u_e1 : umax;
umax = (u_e2 > umax) ? u_e1 : umax;
if (uc_min > umax || uc_max < umin) {
// this contour is not part of the tunnel
_not_tunnel = t;
// save triangle in global memory
t_.triangles[atomicAdd(t_.t_size,1)] = make_int4(v_gindex[e0], v_gindex[e1], v_gindex[e2], 0);
}
}
}
}
// compute vertices of inner hexagon, save new vertices in list and compute and keep
// global vertice index to build triangle connectivity later on.
int tg_idx[6];
float4 po;
for (int i = 0; i < 6; i++) {
int address{ -1 };
tg_idx[i] = insert_vertex_fast(int(9 * gl_index + v_count), ht_, v_, address);
// update nr. of vertices
v_count++;
// create a store vertex and normal
//float4 po;
//float4 hn;
// local coordinates for trilinear interpolation
const float u = hvt[i].x; const float v = hvt[i].y; const float w = hvt[i].z;
po.x = (1 - w)*((1 - v)*(p[tr].x + u*(p[tr + 1].x - p[tr].x)) + v*(p[tr + 2].x + u*(p[tr + 3].x - p[tr + 2].x))) + w*((1 - v)*(p[tr + 4].x + u*(p[tr + 5].x - p[tr + 4].x)) + v*(p[tr + 6].x + u*(p[tr + 7].x - p[tr + 6].x)));
po.y = (1 - w)*((1 - v)*(p[tr].y + u*(p[tr + 1].y - p[tr].y)) + v*(p[tr + 2].y + u*(p[tr + 3].y - p[tr + 2].y))) + w*((1 - v)*(p[tr + 4].y + u*(p[tr + 5].y - p[tr + 4].y)) + v*(p[tr + 6].y + u*(p[tr + 7].y - p[tr + 6].y)));
po.z = (1 - w)*((1 - v)*(p[tr].z + u*(p[tr + 1].z - p[tr].z)) + v*(p[tr + 2].z + u*(p[tr + 3].z - p[tr + 2].z))) + w*((1 - v)*(p[tr + 4].z + u*(p[tr + 5].z - p[tr + 4].z)) + v*(p[tr + 6].z + u*(p[tr + 7].z - p[tr + 6].z)));
//trilinear(po, p, hvt[i].x, hvt[i].y, hvt[i].z);
po.w = 1.f;
v_.vertices[address] = po;
//trilinear(po, n, hvt[i].x, hvt[i].y, hvt[i].z);
po.x = (1 - w)*((1 - v)*(n[tr].x + u*(n[tr + 1].x - n[tr].x)) + v*(n[tr + 2].x + u*(n[tr + 3].x - n[tr + 2].x))) + w*((1 - v)*(n[tr + 4].x + u*(n[tr + 5].x - n[tr + 4].x)) + v*(n[tr + 6].x + u*(n[tr + 7].x - n[tr + 6].x)));
po.y = (1 - w)*((1 - v)*(n[tr].y + u*(n[tr + 1].y - n[tr].y)) + v*(n[tr + 2].y + u*(n[tr + 3].y - n[tr + 2].y))) + w*((1 - v)*(n[tr + 4].y + u*(n[tr + 5].y - n[tr + 4].y)) + v*(n[tr + 6].y + u*(n[tr + 7].y - n[tr + 6].y)));
po.z = (1 - w)*((1 - v)*(n[tr].z + u*(n[tr + 1].z - n[tr].z)) + v*(n[tr + 2].z + u*(n[tr + 3].z - n[tr + 2].z))) + w*((1 - v)*(n[tr + 4].z + u*(n[tr + 5].z - n[tr + 4].z)) + v*(n[tr + 6].z + u*(n[tr + 7].z - n[tr + 6].z)));
// normalize normal
const float factor = std::sqrt(po.x * po.x + po.y * po.y + po.z * po.z);
po.x = po.x / factor;
po.y = po.y / factor;
po.z = po.z / factor;
po.w = 0.f;
v_.normals[address] = po;
}
// triangulate contours with inner hexagon
unsigned char tcon_[12];
for (int i = 0; i < (int)cnt_; i++) {
if (_not_tunnel != i) { // contour belongs to tunnel
const int cnt_sz = (int)get_cnt_size(i, c_);
for (int r = 0; r < cnt_sz; r++) {
int index = -1;
double dist = 1000.;
uint ci = get_c(i, r, c_);
const float u_edge = e_vert(ci, 0);
const float v_edge = e_vert(ci, 1);
const float w_edge = e_vert(ci, 2);
for (int s = 0; s < 6; s++) {
const float uval = u_edge - hvt[s].x;
const float vval = v_edge - hvt[s].y;
const float wval = w_edge - hvt[s].z;
const float val = uval*uval + vval*vval + wval*wval;
if (dist > val) {
index = s;
dist = val;
}
}
tcon_[ci] = (unsigned char)index;
}
// correspondence between vertices found
// create triangles
// needs some functions
auto distanceRingIntsModulo = [](const int d1, const int d2) {
const int r = (d1 - d2) < 0 ? d2 - d1 : d1 - d2;
return (r > 2 ? 6 - r : r);
};
auto midpointRingIntModulo = [](const int d1, const int d2) {
const int dmax = (d1 > d2) ? d1 : d2;
const int dmin = (d1 < d2) ? d1 : d2;
return ((dmax + 2) % 6 == dmin) ? (dmax + 1) % 6 : (dmax + dmin) / 2;
};
for (int r = 0; r < cnt_sz; r++) {
const uint tid1 = get_c(i, r, c_);
const uint tid2 = get_c(i, ((r + 1) % cnt_sz), c_);
const uint cid1 = tcon_[tid1];
const uint cid2 = tcon_[tid2];
// compute index distance
const int dst = distanceRingIntsModulo(cid1, cid2);
switch (dst)
{
case 0:
{
t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[tid1], v_gindex[tid2], tg_idx[cid1],0);
}
break;
case 1:
{
// measure diagonals
// triangulate along shortest diagonal
float u_edge = e_vert(tid1, 0);
float v_edge = e_vert(tid1, 1);
float w_edge = e_vert(tid1, 2);
const float l1 = (u_edge - hvt[cid2].x)*(u_edge - hvt[cid2].x) + (v_edge - hvt[cid2].y)*(v_edge - hvt[cid2].y) + (w_edge - hvt[cid2].z)*(w_edge - hvt[cid2].z);
u_edge = e_vert(tid2, 0);
v_edge = e_vert(tid2, 1);
w_edge = e_vert(tid2, 2);
const double l2 = (u_edge - hvt[cid1].x)*(u_edge - hvt[cid1].x) + (v_edge - hvt[cid1].y)*(v_edge - hvt[cid1].y) + (w_edge - hvt[cid1].z)*(w_edge - hvt[cid1].z);
const int a_ = atomicAdd(t_.t_size, 2);
if (l1 < l2) {
t_.triangles[a_] = make_int4(v_gindex[tid1], v_gindex[tid2], tg_idx[cid2], 0);
t_.triangles[a_+1] = make_int4(v_gindex[tid1], tg_idx[cid2], tg_idx[cid1], 0);
}
else {
t_.triangles[a_] = make_int4(v_gindex[tid1], v_gindex[tid2], tg_idx[cid1], 0);
t_.triangles[a_ + 1] = make_int4(v_gindex[tid2], tg_idx[cid2], tg_idx[cid1], 0);
}
}
break;
case 2:
{
const int cidm = midpointRingIntModulo(cid1, cid2);
const int a_ = atomicAdd(t_.t_size, 3);
t_.triangles[a_] = make_int4(v_gindex[tid1], v_gindex[tid2], tg_idx[cidm], 0);
t_.triangles[a_+1] = make_int4(v_gindex[tid1], tg_idx[cidm], tg_idx[cid1], 0);
t_.triangles[a_+2] = make_int4(v_gindex[tid2], tg_idx[cid2], tg_idx[cidm], 0);
}
break;
} // switch
} // for loop over the vertices of the contour
} // if (_not_tunnel)
} // for loop over contours
if (cnt_ == 1) {
// there is a single contour
// triangulate and close inner hexagon
const int a_ = atomicAdd(t_.t_size, 4);
const bool s_ = asymptotic_decider(F[0], F[1], F[2], F[3]);
const bool of_ = (wi[1] < wi[0]) ? s_ : !s_;
if (!of_) {
t_.triangles[a_] = make_int4(tg_idx[0], tg_idx[2], tg_idx[1], 0);
t_.triangles[a_ + 1] = make_int4(tg_idx[2], tg_idx[4], tg_idx[3], 0);
t_.triangles[a_ + 2] = make_int4(tg_idx[0], tg_idx[5], tg_idx[4], 0);
t_.triangles[a_ + 3] = make_int4(tg_idx[0], tg_idx[4], tg_idx[2], 0);
}
else {
t_.triangles[a_] = make_int4(tg_idx[0], tg_idx[1], tg_idx[2], 0);
t_.triangles[a_ + 1] = make_int4(tg_idx[2], tg_idx[3], tg_idx[4], 0);
t_.triangles[a_ + 2] = make_int4(tg_idx[0], tg_idx[4], tg_idx[5], 0);
t_.triangles[a_ + 3] = make_int4(tg_idx[0], tg_idx[2], tg_idx[4], 0);
}
}
}
else {
// there is no tunnel
// handle case with no saddle point as simple polygons with 3, 4, 5 or six vertices
const unsigned char nr_u{ (unsigned char)nrQSolFace(0, q_sol) };
const unsigned char nr_v{ (unsigned char)nrQSolFace(1, q_sol) };
const unsigned char nr_w{ (unsigned char)nrQSolFace(2, q_sol) };
const unsigned char nr_t{ (unsigned char)(nr_u + nr_v + nr_w) };
if (nr_t == nr_u || nr_t == nr_v || nr_t == nr_w) {
// loop over all contours
for (int i = 0; i < (int)cnt_; i++) {
switch (get_cnt_size(i, c_)) {
case 3:
{
//const int a_ = atomicAdd(t_.t_size, 1);
t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)], 0);
}
break;
case 4:
{
const int a_ = atomicAdd(t_.t_size, 2);
t_.triangles[a_] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)], 0);
t_.triangles[a_+1] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 2, c_)], v_gindex[get_c(i, 3, c_)], 0);
}
break;
case 5:
{
const int a_ = atomicAdd(t_.t_size, 3);
t_.triangles[a_] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)], 0);
t_.triangles[a_+1] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 2, c_)], v_gindex[get_c(i, 3, c_)], 0);
t_.triangles[a_+2] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 3, c_)], v_gindex[get_c(i, 4, c_)], 0);
}
break;
case 6:
{
const int a_ = atomicAdd(t_.t_size, 4);
t_.triangles[a_] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 3, c_)], 0);
t_.triangles[a_+1] = make_int4(v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)], v_gindex[get_c(i, 3, c_)], 0);
t_.triangles[a_+2] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 3, c_)], v_gindex[get_c(i, 4, c_)], 0);
t_.triangles[a_+3] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 4, c_)], v_gindex[get_c(i, 5, c_)], 0);
}
break;
} // switch over size of contour
} // loop over contorus
} // thre are no saddle points
else {
// there are saddle points
//fc1 = fs(1, 1)*fs(2, 1) + fs(1, 2)*fs(2, 2);
//fc2 = fs(1, 1)*fs(3, 1) + fs(1, 2)*fs(3, 2);
//fc3 = fs(2, 1)*fs(3, 2) + fs(2, 2)*fs(3, 1);
unsigned char fs[3][2]{{(unsigned char)(q_sol & 1), (unsigned char)((q_sol >> 1) & 1)}, { (unsigned char)((q_sol >> 2) & 1), (unsigned char)((q_sol >> 3) & 1) }, { (unsigned char)((q_sol >> 4) & 1), (unsigned char)((q_sol >> 5) & 1) }};
const unsigned char fc1 = fs[0][0] * fs[1][0] + fs[0][1] * fs[1][1];
const unsigned char fc2 = fs[0][0] * fs[2][0] + fs[0][1] * fs[2][1];
const unsigned char fc3 = fs[1][0] * fs[2][1] + fs[1][1] * fs[2][0];
const unsigned char c_faces = fc1 + fc2 + fc3;
float ucoord{};
float vcoord{};
float wcoord{};
switch (c_faces) {
case 2:
{
if (fc1 == 0) {
ucoord = fs[0][0] * ui[0] + fs[0][1] * ui[1];
vcoord = fs[1][0] * vi[0] + fs[1][1] * vi[1];
wcoord = fs[1][0] * wi[1] + fs[1][1] * wi[0];
}
else if (fc2 == 0) {
ucoord = fs[0][0] * ui[0] + fs[0][1] * ui[1];
vcoord = fs[0][0] * vi[0] + fs[0][1] * vi[1];
wcoord = fs[0][0] * wi[1] + fs[0][1] * wi[0];
}
else if (fc3 == 0) {
ucoord = fs[1][0] * ui[0] + fs[1][1] * ui[1];
vcoord = fs[1][0] * vi[0] + fs[1][1] * vi[1];
wcoord = fs[1][0] * wi[0] + fs[1][1] * wi[1];
}
}
break;
case 3:
{
ucoord = (fs[0][0] * ui[0] + fs[0][1] * ui[1]) / (fs[0][0] + fs[0][1]);
vcoord = (fs[1][0] * vi[0] + fs[1][1] * vi[1]) / (fs[1][0] + fs[1][1]);
wcoord = (fs[2][0] * wi[0] + fs[2][1] * wi[1]) / (fs[2][0] + fs[2][1]);
}
break;
case 4:
{
const unsigned char nr_u = fs[0][0] + fs[0][1];
const unsigned char nr_v = fs[1][0] + fs[1][1];
const unsigned char nr_w = fs[2][0] + fs[2][1];
if (nr_w == 1) {
ucoord = fs[2][0] * ui[0] + fs[2][1] * ui[1];
vcoord = fs[2][1] * vi[0] + fs[2][0] * vi[1];
wcoord = fs[2][0] * wi[0] + fs[2][1] * wi[1];
}
else if (nr_v == 1) {
ucoord = fs[1][0] * ui[0] + fs[1][1] * ui[1];
vcoord = fs[1][0] * vi[0] + fs[1][1] * vi[1];
wcoord = fs[1][1] * wi[0] + fs[1][0] * wi[1];
}
else if (nr_u == 1) {
ucoord = fs[0][0] * ui[0] + fs[0][1] * ui[1];
vcoord = fs[0][0] * vi[0] + fs[0][1] * vi[1];
wcoord = fs[0][0] * wi[0] + fs[0][1] * wi[1];
}
}
break;
} // switch(c_faces)
// create inner vertex
float4 ip;
float4 in;
//ip.x = (1 - wcoord)*((1 - vcoord)*(p[0].x + ucoord*(p[1].x - p[0].x)) + vcoord*(p[2].x + ucoord*(p[3].x - p[2].x))) + wcoord*((1 - vcoord)*(p[4].x + ucoord*(p[5].x - p[4].x)) + vcoord*(p[6].x + ucoord*(p[7].x - p[6].x)));
//ip.y = (1 - wcoord)*((1 - vcoord)*(p[0].y + ucoord*(p[1].y - p[0].y)) + vcoord*(p[2].y + ucoord*(p[3].y - p[2].y))) + wcoord*((1 - vcoord)*(p[4].y + ucoord*(p[5].y - p[4].y)) + vcoord*(p[6].y + ucoord*(p[7].y - p[6].y)));
//ip.z = (1 - wcoord)*((1 - vcoord)*(p[0].z + ucoord*(p[1].z - p[0].z)) + vcoord*(p[2].z + ucoord*(p[3].z - p[2].z))) + wcoord*((1 - vcoord)*(p[4].z + ucoord*(p[5].z - p[4].z)) + vcoord*(p[6].z + ucoord*(p[7].z - p[6].z)));
//in.x = (1 - wcoord)*((1 - vcoord)*(n[0].x + ucoord*(n[1].x - n[0].x)) + vcoord*(n[2].x + ucoord*(n[3].x - n[2].x))) + wcoord*((1 - vcoord)*(n[4].x + ucoord*(n[5].x - n[4].x)) + vcoord*(n[6].x + ucoord*(n[7].x - n[6].x)));
//in.y = (1 - wcoord)*((1 - vcoord)*(n[0].y + ucoord*(n[1].y - n[0].y)) + vcoord*(n[2].y + ucoord*(n[3].y - n[2].y))) + wcoord*((1 - vcoord)*(n[4].y + ucoord*(n[5].y - n[4].y)) + vcoord*(n[6].y + ucoord*(n[7].y - n[6].y)));
//in.z = (1 - wcoord)*((1 - vcoord)*(n[0].z + ucoord*(n[1].z - n[0].z)) + vcoord*(n[2].z + ucoord*(n[3].z - n[2].z))) + wcoord*((1 - vcoord)*(n[4].z + ucoord*(n[5].z - n[4].z)) + vcoord*(n[6].z + ucoord*(n[7].z - n[6].z)));
trilinear(ip, p, ucoord, vcoord, wcoord);
trilinear(in, n, ucoord, vcoord, wcoord);
// normalize normal
const float factor = std::sqrt(in.x * in.x + in.y * in.y + in.z * in.z);
in.x = in.x / factor;
in.y = in.y / factor;
in.z = in.z / factor;
// the fourth coordinate
ip.w = 1.f;
in.w = 0.f;
// global index
//const int gidx = int(9 * gl_index + v_count);
int gidx = int(9 * gl_index + v_count);
// this point is only used if contours with more than three vertices
// are present
//bool pt_used{ false };
// check if the vertex will be used, this happens
// if there are contours with more than three edges
for (int i = 0; i < (int)cnt_; i++) {
if (get_cnt_size(i, c_) > 3) {
int address{ -1 };
gidx = insert_vertex_fast(gidx, ht_, v_, address);
//gidx = v_.add(ht_, gidx, ip, in);
v_count++;
float4 ip;
//trilinear(ip, p, ucoord, vcoord, wcoord);
ip.x = (1 - wcoord)*((1 - vcoord)*(p[tr].x + ucoord*(p[tr + 1].x - p[tr].x)) + vcoord*(p[tr + 2].x + ucoord*(p[tr + 3].x - p[tr + 2].x))) + wcoord*((1 - vcoord)*(p[tr + 4].x + ucoord*(p[tr + 5].x - p[tr + 4].x)) + vcoord*(p[tr + 6].x + ucoord*(p[tr + 7].x - p[tr + 6].x)));
ip.y = (1 - wcoord)*((1 - vcoord)*(p[tr].y + ucoord*(p[tr + 1].y - p[tr].y)) + vcoord*(p[tr + 2].y + ucoord*(p[tr + 3].y - p[tr + 2].y))) + wcoord*((1 - vcoord)*(p[tr + 4].y + ucoord*(p[tr + 5].y - p[tr + 4].y)) + vcoord*(p[tr + 6].y + ucoord*(p[tr + 7].y - p[tr + 6].y)));
ip.z = (1 - wcoord)*((1 - vcoord)*(p[tr].z + ucoord*(p[tr + 1].z - p[tr].z)) + vcoord*(p[tr + 2].z + ucoord*(p[tr + 3].z - p[tr + 2].z))) + wcoord*((1 - vcoord)*(p[tr + 4].z + ucoord*(p[tr + 5].z - p[tr + 4].z)) + vcoord*(p[tr + 6].z + ucoord*(p[tr + 7].z - p[tr + 6].z)));
ip.w = 1.f;
v_.vertices[address] = ip;
//trilinear(ip, n, ucoord, vcoord, wcoord);
ip.x = (1 - wcoord)*((1 - vcoord)*(n[tr].x + ucoord*(n[tr + 1].x - n[tr].x)) + vcoord*(n[tr + 2].x + ucoord*(n[tr + 3].x - n[tr + 2].x))) + wcoord*((1 - vcoord)*(n[tr + 4].x + ucoord*(n[tr + 5].x - n[tr + 4].x)) + vcoord*(n[tr + 6].x + ucoord*(n[tr + 7].x - n[tr + 6].x)));
ip.y = (1 - wcoord)*((1 - vcoord)*(n[tr].y + ucoord*(n[tr + 1].y - n[tr].y)) + vcoord*(n[tr + 2].y + ucoord*(n[tr + 3].y - n[tr + 2].y))) + wcoord*((1 - vcoord)*(n[tr + 4].y + ucoord*(n[tr + 5].y - n[tr + 4].y)) + vcoord*(n[tr + 6].y + ucoord*(n[tr + 7].y - n[tr + 6].y)));
ip.z = (1 - wcoord)*((1 - vcoord)*(n[tr].z + ucoord*(n[tr + 1].z - n[tr].z)) + vcoord*(n[tr + 2].z + ucoord*(n[tr + 3].z - n[tr + 2].z))) + wcoord*((1 - vcoord)*(n[tr + 4].z + ucoord*(n[tr + 5].z - n[tr + 4].z)) + vcoord*(n[tr + 6].z + ucoord*(n[tr + 7].z - n[tr + 6].z)));
// normalize normal
const float factor = std::sqrt(ip.x * ip.x + ip.y * ip.y + ip.z * ip.z);
ip.x = ip.x / factor;
ip.y = ip.y / factor;
ip.z = ip.z / factor;
ip.w = 0.f;
v_.normals[address] = ip;
break;
}
}
// loop over the contorus
for (int i = 0; i < (int)cnt_; i++) {
const unsigned char cnt_sz = (unsigned char)get_cnt_size(i, c_);
if (cnt_sz == 3) {
//const int a_ = atomicAdd(t_.t_size, 1);
t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)], 0);
}
else {
//pt_used = true;
for (int t = 0; t < cnt_sz; t++) {
// add triangle to list
t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[get_c(i, t, c_)], v_gindex[get_c(i, (t + 1) % cnt_sz, c_)], gidx, 0);
}
}
}
} // else - there are saddle points
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Map vertex global id to position in vertex array
// to construct shared vertex list
__global__ void map_triangles(const int nr_t, VertexHashTable ht_, Triangles t_)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= nr_t)
return;
t_.triangles[tid].x = ht_.addr[t_.triangles[tid].x]; //find_vertex(k0, ht_);
t_.triangles[tid].y = ht_.addr[t_.triangles[tid].y]; //find_vertex(k1, ht_);
t_.triangles[tid].z = ht_.addr[t_.triangles[tid].z]; //find_vertex(k2, ht_);
}
__global__ void map_triangles_fast(const int nr_t, VertexHashTable ht_, Triangles t_)
{
const int tid = (blockIdx.x * blockDim.x + threadIdx.x);
const int offset = tid % 3;
const int t = tid / 3;
if (t >= nr_t)
return;
switch (offset) {
case 0:
t_.triangles[t].x = ht_.addr[t_.triangles[t].x]; //find_vertex(k0, ht_);
break;
case 1:
t_.triangles[t].y = ht_.addr[t_.triangles[t].y]; //find_vertex(k1, ht_);
break;
case 2:
t_.triangles[t].z = ht_.addr[t_.triangles[t].z]; //find_vertex(k2, ht_);
break;
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// create three halfedges from triangle
// for the triangle store a corresponding halfe edge
// for each vertex store the starting halfedge
__global__ void create_halfedge(const int nr_he, Triangles t_, Halfedges he_, HalfedgeHashTable het_)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= nr_he) {
return;
}
const int offset = tid % 3;
const int t = tid / 3;
// get data from global memory
const int4 tri = t_.triangles[tid / 3];
// create and save halfedges
// halfedge
switch (offset) {
case 0:
he_.he_e[tid].x = tri.x; // origin vertex
he_.he_e[tid].y = t; // face id
he_.he_e[tid].z = tid + 1; // next halfedge
he_.he_e[tid].w = -1; // boundary edge at initialization
he_.he_v[tri.x] = tid;
break;
case 1:
he_.he_e[tid].x = tri.y; // origin vertex
he_.he_e[tid].y = t; // face id
he_.he_e[tid].z = tid + 1; // next halfedge
he_.he_e[tid].w = -1; // boundary edge at initialization
he_.he_v[tri.y] = tid;
break;
case 2:
he_.he_e[tid].x = tri.z; // origin vertex
he_.he_e[tid].y = t; // face id
he_.he_e[tid].z = tid - 2; // next halfedge
he_.he_e[tid].w = -1; // boundary edge at initialization
he_.he_v[tri.z] = tid;
break;
}
// save halfedge ids in hash table to construct later twins neighborhood
// insert_halfedge_id(const int t_size, unsigned long long *he_table, int2* he_ids, int he_addr, int v0, int v1)
//insert_halfedge_id(he_size, he_table, he_ids, 3 * tid, tri.x, tri.y);
//insert_halfedge_id(he_size, he_table, he_ids, 3 * tid + 1, tri.y, tri.z);
//insert_halfedge_id(he_size, he_table, he_ids, 3 * tid + 2, tri.z, tri.x);
addHalfedgeToHashTable(het_, tid, tri.x, tri.y);
//addHalfedgeToHashTable(het_,3 * tid + 1, tri.y, tri.z);
//addHalfedgeToHashTable(het_,3 * tid + 2, tri.z, tri.x);
// map vertex to halfedge
//he_.he_v[tri.x] = 3 * tid;
//he_.he_v[tri.y] = 3 * tid + 1;
//he_.he_v[tri.z] = 3 * tid + 2;
// map face to halfedge, this is a redundant information
//he_f[tid] = 3 * tid;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// map vertex index of src vertex in halfedge indices to final index in vertex array
// at this point we know the total nr. of half edges, and the total nr. of vertices
__global__ void map_halfedge_vertex(const int nr_he, int4* he_e, VertexHashTable ht_, int* he_v)
{
const int gl_index = blockIdx.x * blockDim.x + threadIdx.x;
if (gl_index >= nr_he)
return;
// he.x = origin vertex
// he.y = face
// he.z = next
// he.w = tween
// set vertex id
//const int v = find_vertex(he_e[gl_index].x, ht_);
const int v = ht_.addr[he_e[gl_index].x];
he_e[gl_index].x = v;
he_v[v] = gl_index; // vertex points to this halfedges
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// map index to boundary vertex
__global__ void map_halfedge_bndvertex(const int nr_he, int4* he_e, int* he_v)
{
const int gl_index = blockIdx.x * blockDim.x + threadIdx.x;
if (gl_index >= nr_he)
return;
// he.x = origin vertex
// he.y = face
// he.z = next
// he.w = tween, -1 if boundary edge
if (he_e[gl_index].w == -1) {
he_v[he_e[gl_index].x] = gl_index;
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// map halfedge twins
__global__ void map_halfedge_twins(Halfedges he_, HalfedgeHashTable het_)
{
const int gl_index = blockIdx.x * blockDim.x + threadIdx.x;
if (gl_index >= het_.t_size) {
return;
}
if (het_.key[gl_index] > 0) {
const int he0 = het_.he_ids[gl_index].x;
const int he1 = het_.he_ids[gl_index].y;
he_.he_e[he0].w = he1;
he_.he_e[he1].w = he0;
}
}
__global__ void map_halfedge_twins_fast(Halfedges he_, HalfedgeHashTable het_)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int gl_index = tid / 2;
const int offset = tid % 2;
if (gl_index >= het_.t_size) {
return;
}
if (het_.key[gl_index] > 0) {
const int he0 = het_.he_ids[gl_index].x;
const int he1 = het_.he_ids[gl_index].y;
switch (offset) {
case 0:
he_.he_e[he0].w = he1;
break;
case 1:
he_.he_e[he1].w = he0;
break;
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// add halfedge to table
__device__ void addHalfedges(Halfedges he_, HalfedgeHashTable het_, const int v0, const int v1, const int v2)
{
const int a_ = atomicAdd(he_.t_size, 1);
const int f_ = a_ / 3;
// he 0
he_.he_e[a_].x = v0;
he_.he_e[a_].y = f_; // there are three halfedges for each face (triangle)
he_.he_e[a_].z = a_ + 1; // next
he_.he_e[a_].w = -1; // default is boundary edge
addHalfedgeToHashTable(het_,a_, v0, v1);
// he 1
he_.he_e[a_ + 1].x = v1;
he_.he_e[a_ + 1].y = f_; // there are three halfedges for each face (triangle)
he_.he_e[a_ + 1].z = a_ + 2;
he_.he_e[a_ + 1].w = -1; // default is boundary edge
addHalfedgeToHashTable(het_,a_ + 1, v1, v2);
// he 2
he_.he_e[a_ + 2].x = v2;
he_.he_e[a_ + 2].y = f_; // there are three halfedges for each face (triangle)
he_.he_e[a_ + 2].z = a_;
he_.he_e[a_ + 2].w = -1; // default is boundary edge
addHalfedgeToHashTable(het_,a_ + 2, v2, v0);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Halfedge Marching cubes
__global__ void he_mcSlice(const float i0, hipTextureObject_t v_data, UGrid ugrid, MC_lookup l_tables, int nr_cells, int* cellid, AmbiguousCells ac_, VertexHashTable ht_, Vertices v_, HalfedgeHashTable het_, Halfedges he_)
{
// get thread id
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (nr_cells <= tid)
return;
// compute grid indices from global index
const int gl_index = cellid[tid];
const int i_index = ugrid.i_index(gl_index);
const int j_index = ugrid.j_index(gl_index);
const int k_index = ugrid.k_index(gl_index);
// construct 8 cell vertices
float3 v[8];
cell_vertices(v, i_index, j_index, k_index, ugrid);
// scalar values at vertices
float u[8];
u[0] = tex3D<float>(v_data, i_index, j_index, k_index);
u[1] = tex3D<float>(v_data, i_index + 1, j_index, k_index);
u[2] = tex3D<float>(v_data, i_index, j_index + 1, k_index);
u[3] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index);
u[4] = tex3D<float>(v_data, i_index, j_index, k_index + 1);
u[5] = tex3D<float>(v_data, i_index + 1, j_index, k_index + 1);
u[6] = tex3D<float>(v_data, i_index, j_index + 1, k_index + 1);
u[7] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index + 1);
// compute normals at vertices
float3 n[8];
gradient(n, v_data, ugrid, i_index, j_index, k_index);
// compute case
uint i_case{ 0 };
i_case = i_case + ((uint)(u[0] >= i0));
i_case = i_case + ((uint)(u[1] >= i0)) * 2;
i_case = i_case + ((uint)(u[2] >= i0)) * 4;
i_case = i_case + ((uint)(u[3] >= i0)) * 8;
i_case = i_case + ((uint)(u[4] >= i0)) * 16;
i_case = i_case + ((uint)(u[5] >= i0)) * 32;
i_case = i_case + ((uint)(u[6] >= i0)) * 64;
i_case = i_case + ((uint)(u[7] >= i0)) * 128;
// ambiguous cases are processed in the next pass
//if (105 == l_tables.t_ambig[i_case]) {
ushort e_ = l_tables.e_[i_case];
if (e_ & BIT_16) {
//ac_.add(gl_index);
ac_.a_cells[atomicAdd(ac_.t_size, 1)] = gl_index;
return; // don't process this cell with standard MC
}
// Compute intersection with edges
const unsigned long long gei_pattern_ = 670526590282893600ull;
const unsigned char l_edges_[12]{ 16, 49, 50, 32, 84, 117, 118, 100, 64, 81, 115, 98 };
int v_gindex[12]{};
ushort flag{ 1 };
//const ushort e_pattern = l_tables.ePattern(i_case); // l_tables.e_pattern[i_case];
for (int e = 0; e < 12; e++) {
v_gindex[e] = -1;
if (flag & e_) {
// compute edge inersection
// compute local coordinate along edge
const int v0 = (l_edges_[e] & 0xF);
const int v1 = (l_edges_[e] >> 4) & 0xF;
const float l = (i0 - u[v0]) / (u[v1] - u[v0]);
float4 vp = make_float4(v[v0].x + l*(v[v1].x - v[v0].x), v[v0].y + l*(v[v1].y - v[v0].y), v[v0].z + l*(v[v1].z - v[v0].z), 1.f);
float4 np = make_float4(n[v0].x + l*(n[v1].x - n[v0].x), n[v0].y + l*(n[v1].y - n[v0].y), n[v0].z + l*(n[v1].z - n[v0].z), 0.f);
const float length = std::sqrt(np.x * np.x + np.y * np.y + np.z * np.z);
np.x = np.x / length;
np.y = np.y / length;
np.z = np.z / length;
// get unique vertex index
// compute vertex global index
const int ix = i_index + (int)((gei_pattern_ >> 5 * e) & 1); // global_edge_id[eg][0];
const int iy = j_index + (int)((gei_pattern_ >> (5 * e + 1)) & 1); // global_edge_id[eg][1];
const int iz = k_index + (int)((gei_pattern_ >> (5 * e + 2)) & 1); // global_edge_id[eg][2];
const int off_val = (int)((gei_pattern_ >> (5 * e + 3)) & 3);
v_gindex[e] = insert_vertex(int(9 * ugrid.gl_index(ix, iy, iz) + off_val), ht_, v_, vp, np);
}
flag <<= 1;
}
// compute triangles
//const unsigned char* t_ambig = l_tables.t_ambig;
unsigned long long tl_ = l_tables.t_[i_case];
for (int t = 0; t < 16; t += 3) {
//const int t_index = i_case * 16 + t;
//if (t_pattern[i_case * 16 + t] == -1) {
if (((tl_ >> (4 * t)) & 0xFull) == 0xF) {
// there are no more triangles
break;
}
// save tirangle
const int v0 = (int)((tl_ >> (4 * t)) & 0xFull);
const int v1 = (int)((tl_ >> (4 * (t + 1))) & 0xFull);
const int v2 = (int)((tl_ >> (4 * (t + 2))) & 0xFull);
// create three halfedges
addHalfedges(he_, het_, v_gindex[v0], v_gindex[v1], v_gindex[v2]);
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute ambiguous cells
__global__ void he_tSlice(const float i0, hipTextureObject_t v_data, UGrid ugrid, MC_lookup l_tables, const int nr_cells, AmbiguousCells ac_, VertexHashTable ht_,Vertices v_, HalfedgeHashTable het_, Halfedges he_)
{
// get cell id
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (nr_cells <= tid)
return;
// compute grid indices from global index
const int gl_index = ac_.a_cells[tid];
const int i_index = ugrid.i_index(gl_index);
const int j_index = ugrid.j_index(gl_index);
const int k_index = ugrid.k_index(gl_index);
// construct 8 cell vertices
float3 p[8];
cell_vertices(p, i_index, j_index, k_index, ugrid);
// scalar values at vertices
float F[8];
F[0] = tex3D<float>(v_data, i_index, j_index, k_index);
F[1] = tex3D<float>(v_data, i_index + 1, j_index, k_index);
F[2] = tex3D<float>(v_data, i_index, j_index + 1, k_index);
F[3] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index);
F[4] = tex3D<float>(v_data, i_index, j_index, k_index + 1);
F[5] = tex3D<float>(v_data, i_index + 1, j_index, k_index + 1);
F[6] = tex3D<float>(v_data, i_index, j_index + 1, k_index + 1);
F[7] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index + 1);
// compute normals at vertices
float3 n[8];
gradient(n, v_data, ugrid, i_index, j_index, k_index);
// compute case
uint i_case{ 0 };
i_case = i_case + ((uint)(F[0] >= i0));
i_case = i_case + ((uint)(F[1] >= i0)) * 2;
i_case = i_case + ((uint)(F[2] >= i0)) * 4;
i_case = i_case + ((uint)(F[3] >= i0)) * 8;
i_case = i_case + ((uint)(F[4] >= i0)) * 16;
i_case = i_case + ((uint)(F[5] >= i0)) * 32;
i_case = i_case + ((uint)(F[6] >= i0)) * 64;
i_case = i_case + ((uint)(F[7] >= i0)) * 128;
// Compute intersection with edges
const unsigned long long gei_pattern_ = 670526590282893600ull;
const unsigned char l_edges_[12]{ 16, 49, 50, 32, 84, 117, 118, 100, 64, 81, 115, 98 };
// compute intersection with cell edges
float ecoord[12]{};
int v_gindex[12]{};
ushort flag{ 1 };
ushort e_ = l_tables.e_[i_case];
//ushort e_pattern = l_tables.ePattern(i_case); // l_tables.e_pattern[i_case];
for (int e = 0; e < 12; e++) {
v_gindex[e] = -1;
//ecoord[e] = 0.f;
if (flag & e_) {
// compute edge inersection
// compute local coordinate along edge
const int v0 = (l_edges_[e] & 0xF);
const int v1 = (l_edges_[e] >> 4) & 0xF;
const float l = (i0 - F[v0]) / (F[v1] - F[v0]);
float4 vp = make_float4(p[v0].x + l*(p[v1].x - p[v0].x), p[v0].y + l*(p[v1].y - p[v0].y), p[v0].z + l*(p[v1].z - p[v0].z), 1.f);
float4 np = make_float4(n[v0].x + l*(n[v1].x - n[v0].x), n[v0].y + l*(n[v1].y - n[v0].y), n[v0].z + l*(n[v1].z - n[v0].z), 0.f);
const float length = std::sqrt(np.x * np.x + np.y * np.y + np.z * np.z);
np.x = np.x / length;
np.y = np.y / length;
np.z = np.z / length;
// get unique vertex index
// compute vertex global index
const int ix = i_index + (int)((gei_pattern_ >> 5 * e) & 1); // global_edge_id[eg][0];
const int iy = j_index + (int)((gei_pattern_ >> (5 * e + 1)) & 1); // global_edge_id[eg][1];
const int iz = k_index + (int)((gei_pattern_ >> (5 * e + 2)) & 1); // global_edge_id[eg][2];
const int off_val = (int)((gei_pattern_ >> (5 * e + 3)) & 3);
v_gindex[e] = insert_vertex(int(9 * ugrid.gl_index(ix, iy, iz) + off_val), ht_, v_, vp, np);
// remember local coordinate along edge
ecoord[e] = l;
}
flag <<= 1;
}
// compute oriented contours
// 1. build segments
// 2. connect segments
// build up segments
// set segments map
unsigned char segm_[12] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
auto set_segm = [](const int ei, const int eo, unsigned char segm_[12]) {
segm_[ei] &= 0xF0;
segm_[ei] |= ((unsigned char)eo) & 0xF;
segm_[eo] &= 0xF;
segm_[eo] |= ((unsigned char)ei) << 4;
};
auto get_segm = [](const int e, const int pos, unsigned char segm_[12]) {
if (pos == 0)
return (int)(segm_[e] & 0xF);
else
return (int)((segm_[e] >> 4) & 0xF);
};
auto is_segm_set = [](const int e, unsigned char segm_[12]) {
return (segm_[e] != 0xFF);
};
auto unset_segm = [](const int e, unsigned char segm_[12]) {
segm_[e] = 0xFF;
};
// In order to compute oriented segments, the hexahedron has to be flatten.
// The insides of the faces of the hexahedron have to be all at the same
// side of the flattend hexa. This requires changing the order of the
// edges when reading from the faces
// code edges at face
unsigned short e_face_[6]{ (ushort)291, (ushort)18277, (ushort)18696, (ushort)10859, (ushort)33719, (ushort)38305 };
// code vertices at face
unsigned short v_face_[6]{ (ushort)12576, (ushort)25717, (ushort)5380, (ushort)29538, (ushort)8292, (ushort)30001 };
// reading edge from face
auto get_face_e = [e_face_](const int f, const int e) { return ((e_face_[f] >> (4 * e)) & 0xF); };
auto get_face_v = [v_face_](const int f, const int e) { return ((v_face_[f] >> (4 * e)) & 0xF); };
// compute oriented segments using the isoline scheme at the faces
auto asymptotic_decider = [](const float f0, const float f1, const float f2, const float f3) {
return (f0*f3 - f1*f2) / (f0 + f3 - f1 - f2);
};
uchar f_flag{ 0 };
for (int f = 0; f < 6; f++) {
// classify face
unsigned int f_case{ 0 };
const int v0 = get_face_v(f, 0);
const int v1 = get_face_v(f, 1);
const int v2 = get_face_v(f, 2);
const int v3 = get_face_v(f, 3);
const int e0 = get_face_e(f, 0);
const int e1 = get_face_e(f, 1);
const int e2 = get_face_e(f, 2);
const int e3 = get_face_e(f, 3);
const float f0 = F[v0];
const float f1 = F[v1];
const float f2 = F[v2];
const float f3 = F[v3];
if (f0 >= i0)
f_case |= BIT_1;
if (f1 >= i0)
f_case |= BIT_2;
if (f2 >= i0)
f_case |= BIT_3;
if (f3 >= i0)
f_case |= BIT_4;
switch (f_case)
{
case 1:
set_segm(e0, e3, segm_);
break;
case 2:
set_segm(e1, e0, segm_);
break;
case 3:
set_segm(e1, e3, segm_);
break;
case 4:
set_segm(e3, e2, segm_);
break;
case 5:
set_segm(e0, e2, segm_);
break;
case 6:
{
const float val = asymptotic_decider(f0, f1, f2, f3);
if (val > i0) {
set_segm(e3, e0, segm_);
set_segm(e1, e2, segm_);
}
else if (val < i0) {
set_segm(e1, e0, segm_);
set_segm(e3, e2, segm_);
}
else {
// set flag for this face
f_flag |= (1 << f);
float ec0 = ecoord[e0];
float ec1 = ecoord[e1];
float ec2 = ecoord[e2];
float ec3 = ecoord[e3];
if ((0x218 >> (f * 2)) & BIT_1) {
ec0 = 1 - ec0;
ec2 = 1 - ec2;
}
if ((0x218 >> (f * 2)) & BIT_2) {
ec1 = 1 - ec1;
ec3 = 1 - ec3;
}
if (ec1 < ec3 && ec0 > ec2) {
set_segm(e1, e0, segm_);
set_segm(e3, e2, segm_);
}
else if (ec1 > ec3 && ec0 < ec2) {
set_segm(e3, e0, segm_);
set_segm(e1, e2, segm_);
}
else {
return;
}
}
}
break;
case 7:
set_segm(e1, e2, segm_);
break;
case 8:
set_segm(e2, e1, segm_);
break;
case 9:
{
const double val = asymptotic_decider(f0, f1, f2, f3);
if (val > i0) {
set_segm(e0, e1, segm_);
set_segm(e2, e3, segm_);
}
else if (val < i0) {
set_segm(e0, e3, segm_);
set_segm(e2, e1, segm_);
}
else {
f_flag = (1 << f);
// singular case val == i0, there are no asymptotes
// check if there is a reasonable triangulation of the face
float ec0 = ecoord[e0];
float ec1 = ecoord[e1];
float ec2 = ecoord[e2];
float ec3 = ecoord[e3];
if ((0x218 >> (f * 2)) & BIT_1) {
ec0 = 1 - ec0;
ec2 = 1 - ec2;
}
if ((0x218 >> (f * 2)) & BIT_2) {
ec1 = 1 - ec1;
ec3 = 1 - ec3;
}
if (ec1 < ec3 && ec0 > ec2) {
set_segm(e0, e1, segm_);
set_segm(e2, e3, segm_);
}
else if (ec1 > ec3 && ec0 < ec2) {
set_segm(e0, e3, segm_);
set_segm(e2, e1, segm_);
}
else {
return;
}
}
}
break;
case 10:
set_segm(e2, e0, segm_);
break;
case 11:
set_segm(e2, e3, segm_);
break;
case 12:
set_segm(e3, e1, segm_);
break;
case 13:
set_segm(e0, e1, segm_);
break;
case 14:
set_segm(e3, e0, segm_);
break;
default:
break;
}
}
// connect oriented segments into oriented contours
// closed contours are coded in 64 bit unsigned long long
// 1) Each entry has 4 bits
// 2) The first 4 entries are reserved for the size of the contours
// 3) The next 12 entries are the indices of the edges constituting the contorus
// The indices are numbers from 0 to 12
unsigned long long c_ = 0xFFFFFFFFFFFF0000;
// in the 4 first bits store size of contours
auto get_cnt_size = [](const int cnt, unsigned long long &c_) {
return (size_t)((c_ & (0xF << 4 * cnt)) >> 4 * cnt);
};
auto set_cnt_size = [](const int cnt, const int size, unsigned long long &c_) {
// unset contour size
c_ &= ~(0xF << 4 * cnt);
c_ |= (size << 4 * cnt);
};
// set corresponging edge
auto set_c = [](const int cnt, const int pos, const int val, unsigned long long &c_) {
const uint mask[4] = { 0x0, 0xF, 0xFF, 0xFFF };
const uint c_sz = c_ & mask[cnt];
const uint e = 16 + 4 * ((c_sz & 0xF) + ((c_sz & 0xF0) >> 4) + ((c_sz & 0xF00) >> 8) + pos);
c_ &= ~(((unsigned long long)0xF) << e);
c_ |= (((unsigned long long)val) << e);
};
// read edge from contour
auto get_c = [](const int cnt, const int pos, unsigned long long c_) {
const uint mask[4] = { 0x0, 0xF, 0xFF, 0xFFF };
const uint c_sz = (uint)(c_ & mask[cnt]);
const uint e = 16 + 4 * ((c_sz & 0xF) + ((c_sz & 0xF0) >> 4) + ((c_sz & 0xF00) >> 8) + pos);
return (int)((c_ >> e) & 0xF);
};
// connect oriented contours
uint cnt_{ 0 };
for (uint e = 0; e < 12; e++) {
if (is_segm_set(e, segm_)) {
uint eTo = get_segm(e, 0, segm_);
uint eIn = get_segm(e, 1, segm_);
uint eStart = e;
uint pos = 0;
set_c(cnt_, pos, eStart, c_);
while (eTo != eStart) {
pos = pos + 1;
set_c(cnt_, pos, eTo, c_);
eIn = eTo;
eTo = get_segm(eIn, 0, segm_);
unset_segm(eIn, segm_);
}
// set contour length
set_cnt_size(cnt_, pos + 1, c_);
// update number of contours
cnt_ = cnt_ + 1;
}
}
// compute intersection of opposite faces
float ui[2]{};
float vi[2]{};
float wi[2]{};
unsigned char q_sol{ 0 };
const float a = (F[0] - F[1])*(-F[6] + F[7] + F[4] - F[5]) - (F[4] - F[5])*(-F[2] + F[3] + F[0] - F[1]);
const float b = (i0 - F[0])*(-F[6] + F[7] + F[4] - F[5]) + (F[0] - F[1])*(F[6] - F[4]) - (i0 - F[4])*(-F[2] + F[3] + F[0] - F[1]) - (F[4] - F[5])*(F[2] - F[0]);
const float c = (i0 - F[0])*(F[6] - F[4]) - (i0 - F[4])*(F[2] - F[0]);;
float d = b*b - 4 * a*c;
if (d > 0) {
d = std::sqrt(d);
// compute u-coord of solutions
ui[0] = (-b - d) / (2 * a);
ui[1] = (-b + d) / (2 * a);
// compute v-coord of solutions
float g1 = F[0] * (1 - ui[0]) + F[1] * ui[0];
float g2 = F[2] * (1 - ui[0]) + F[3] * ui[0];
vi[0] = (i0 - g1) / (g2 - g1);
if (isnan(vi[0]) || isinf(vi[0])) {
vi[0] = -1.f;
}
g1 = F[0] * (1 - ui[1]) + F[1] * ui[1];
g2 = F[2] * (1 - ui[1]) + F[3] * ui[1];
vi[1] = (i0 - g1) / (g2 - g1);
if (isnan(vi[1]) || isinf(vi[1])) {
vi[1] = -1.f;
}
// compute w-coordinates of solutions
g1 = F[0] * (1 - ui[0]) + F[1] * ui[0];
g2 = F[4] * (1 - ui[0]) + F[5] * ui[0];
wi[0] = (i0 - g1) / (g2 - g1);
if (isnan(wi[0]) || isinf(wi[0])) {
wi[0] = -1.f;
}
g1 = F[0] * (1 - ui[1]) + F[1] * ui[1];
g2 = F[4] * (1 - ui[1]) + F[5] * ui[1];
wi[1] = (i0 - g1) / (g2 - g1);
if (isnan(wi[1]) || isinf(wi[1])) {
wi[1] = -1.f;
}
// correct values for roots of quadratic equations
// in case the asymptotic decider has failed
if (f_flag & BIT_1) { // face 1, w = 0;
if (wi[0] < wi[1]) wi[0] = 0;
else wi[1] = 0;
}
if (f_flag & BIT_2) { // face 2, w = 1
if (wi[0] > wi[1]) wi[1] = 1;
else wi[1] = 1;
}
if (f_flag & BIT_3) { // face 3, v = 0
if (vi[0] < vi[1]) vi[0] = 0;
else vi[1] = 0;
}
if (f_flag & BIT_4) { // face 4, v = 1
if (vi[0] > vi[1]) vi[0] = 1;
else vi[1] = 1;
}
if (f_flag & BIT_5) { // face 5, u = 0
if (ui[0] < ui[1]) ui[0] = 0;
else ui[1] = 0;
}
if (f_flag & BIT_6) { // face 6, u = 1
if (ui[0] > ui[1]) ui[0] = 1;
else ui[1] = 1;
}
// check solution intervals
if (0 < ui[0] && ui[0] < 1) {
q_sol |= 1;
}
if (0 < ui[1] && ui[1] < 1) {
q_sol |= 2;
}
if (0 < vi[0] && vi[0] < 1) {
q_sol |= 4;
}
if (0 < vi[1] && vi[1] < 1) {
q_sol |= 8;
}
if (0 < wi[0] && wi[0] < 1) {
q_sol |= 16;
}
if (0 < wi[1] && wi[1] < 1) {
q_sol |= 32;
}
}
// compute the number of solutions to the quadratic equation for a given face
auto nrQSolFace = [](const uint f, const unsigned char n) {
uint nr{ 0 };
switch (f) {
case 0:
if ((n & 0x5) == 0x5)
nr = nr + 1;
if ((n & 0xA) == 0xA)
nr = nr + 1;
break;
case 1:
if ((n & 0x11) == 0x11) nr = nr + 1;
if ((n & 0x22) == 0x22) nr = nr + 1;
break;
case 2:
if ((n & 0x18) == 0x18) nr = nr + 1;
if ((n & 0x24) == 0x24) nr = nr + 1;
break;
}
return nr;
};
// triangulate contours
// if all bits are set, then there are three pairs of nontrivial solutions
// to the quadratic equations. In this case, there is a tunnel or a contour
// with 12 vertices. If there are three contours, then there is a tunnel and
// one of the contorus with only three vertices is not part of it.
// Triangles are stored in global memory starting at offset
// count nr. of inner vertices to compute right global index
// first inner vertex has index cell_global_index + 3;
int v_count{ 3 };
if (numberOfSetBits<unsigned char>(q_sol) == 6) {
// there are at most three contours
// Possible cases:
// 1) a single contour with 12 vertices
// 2) two contours which build a tunnel
// 3) three contours, one has only 3 vertices and does not belong to the tunnel
// construct the six vertices of the inner hexagon
float3 hvt[6];
hvt[0].x = ui[0]; hvt[0].y = vi[0]; hvt[0].z = wi[0];
hvt[1].x = ui[0]; hvt[1].y = vi[0]; hvt[1].z = wi[1];
hvt[2].x = ui[1]; hvt[2].y = vi[0]; hvt[2].z = wi[1];
hvt[3].x = ui[1]; hvt[3].y = vi[1]; hvt[3].z = wi[1];
hvt[4].x = ui[1]; hvt[4].y = vi[1]; hvt[4].z = wi[0];
hvt[5].x = ui[0]; hvt[5].y = vi[1]; hvt[5].z = wi[0];
// construct vertices at intersections with the edges
auto e_vert = [&ecoord](const int e, const int i) {
const unsigned int l_coord[3]{ 1324855, 5299420, 16733440 };
unsigned char flag = (l_coord[i] >> (2 * e)) & 3;
if (flag == 3)
return ecoord[e];
else
return (float)(flag);
};
// if there are three contours, then there is a tunnel and one
// of the contours is not part of it.
unsigned char _not_tunnel = 0xF;
if (cnt_ == 3) {
// loop over the contorus
// triangulate the contour which is not part of
// the tunnel
const float uc_min = (ui[0] < ui[1]) ? ui[0] : ui[1];
const float uc_max = (ui[0] < ui[1]) ? ui[1] : ui[0];
for (int t = 0; t < (int)cnt_; t++) {
if (get_cnt_size(t, c_) == 3) {
float umin = 2;
float umax = -2;
uint e0 = get_c(t, 0, c_);
uint e1 = get_c(t, 1, c_);
uint e2 = get_c(t, 2, c_);
const float u_e0 = e_vert(e0, 0);
const float u_e1 = e_vert(e1, 0);
const float u_e2 = e_vert(e2, 0);
umin = (u_e0 < umin) ? u_e0 : umin;
umin = (u_e1 < umin) ? u_e1 : umin;
umin = (u_e2 < umin) ? u_e2 : umin;
umax = (u_e0 > umax) ? u_e0 : umax;
umax = (u_e1 > umax) ? u_e1 : umax;
umax = (u_e2 > umax) ? u_e1 : umax;
if (uc_min > umax || uc_max < umin) {
// this contour is not part of the tunnel
_not_tunnel = t;
// save triangle in global memory
addHalfedges(he_, het_, v_gindex[e0], v_gindex[e1], v_gindex[e2]);
//const int a_ = atomicAdd(he_cnt, 3);
//addHalfedges(nr_he, he_e, he_table, he_ids, a_, v_gindex[e0], v_gindex[e1], v_gindex[e2]);
//t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[e0], v_gindex[e1], v_gindex[e2], 0);
}
}
}
}
// compute vertices of inner hexagon, save new vertices in list and compute and keep
// global vertice index to build triangle connectivity later on.
int tg_idx[6];
for (int i = 0; i < 6; i++) {
float4 hp;
float4 hn;
// local coordinates for trilinear interpolation
const float u = hvt[i].x; const float v = hvt[i].y; const float w = hvt[i].z;
hp.x = (1 - w)*((1 - v)*(p[0].x + u*(p[1].x - p[0].x)) + v*(p[2].x + u*(p[3].x - p[2].x))) + w*((1 - v)*(p[4].x + u*(p[5].x - p[4].x)) + v*(p[6].x + u*(p[7].x - p[6].x)));
hp.y = (1 - w)*((1 - v)*(p[0].y + u*(p[1].y - p[0].y)) + v*(p[2].y + u*(p[3].y - p[2].y))) + w*((1 - v)*(p[4].y + u*(p[5].y - p[4].y)) + v*(p[6].y + u*(p[7].y - p[6].y)));
hp.z = (1 - w)*((1 - v)*(p[0].z + u*(p[1].z - p[0].z)) + v*(p[2].z + u*(p[3].z - p[2].z))) + w*((1 - v)*(p[4].z + u*(p[5].z - p[4].z)) + v*(p[6].z + u*(p[7].z - p[6].z)));
hn.x = (1 - w)*((1 - v)*(n[0].x + u*(n[1].x - n[0].x)) + v*(n[2].x + u*(n[3].x - n[2].x))) + w*((1 - v)*(n[4].x + u*(n[5].x - n[4].x)) + v*(n[6].x + u*(n[7].x - n[6].x)));
hn.y = (1 - w)*((1 - v)*(n[0].y + u*(n[1].y - n[0].y)) + v*(n[2].y + u*(n[3].y - n[2].y))) + w*((1 - v)*(n[4].y + u*(n[5].y - n[4].y)) + v*(n[6].y + u*(n[7].y - n[6].y)));
hn.z = (1 - w)*((1 - v)*(n[0].z + u*(n[1].z - n[0].z)) + v*(n[2].z + u*(n[3].z - n[2].z))) + w*((1 - v)*(n[4].z + u*(n[5].z - n[4].z)) + v*(n[6].z + u*(n[7].z - n[6].z)));
// normalize normal
const float factor = std::sqrt(hn.x * hn.x + hn.y * hn.y + hn.z * hn.z);
hn.x = hn.x / factor;
hn.y = hn.y / factor;
hn.z = hn.z / factor;
// the fourth coord.
hp.w = 1.f;
hn.w = 0.f;
// this vertices are inner vertices
tg_idx[i] = insert_vertex(int(9 * gl_index + v_count),ht_,v_,hp,hn);
//int v_addr{ -1 };
//if (insert_vertex_key(tg_idx[i], ht_, v_addr)) {
// //const int pos = atomicAdd(v_.t_size, 1);
// //v_.vertices[pos] = hp;
// //v_.normals[pos] = hn;
// //v_.v[pos].v = hp;
// //v_.v[pos].n = hn;
// //ht_.addr[v_addr] = pos;
// ht_.addr[v_addr] = v_.set(hp, hn);
//}
// update nr. of vertices
v_count++;
}
// triangulate contours with inner hexagon
unsigned char tcon_[12];
for (int i = 0; i < (int)cnt_; i++) {
if (_not_tunnel != i) { // contour belongs to tunnel
const int cnt_sz = (int)get_cnt_size(i, c_);
for (int r = 0; r < cnt_sz; r++) {
int index = -1;
double dist = 1000.;
uint ci = get_c(i, r, c_);
const double u_edge = e_vert(ci, 0);
const double v_edge = e_vert(ci, 1);
const double w_edge = e_vert(ci, 2);
for (int s = 0; s < 6; s++) {
const double uval = u_edge - hvt[s].x;
const double vval = v_edge - hvt[s].y;
const double wval = w_edge - hvt[s].z;
double val = uval*uval + vval*vval + wval*wval;
if (dist > val) {
index = s;
dist = val;
}
}
tcon_[ci] = (unsigned char)index;
}
// correspondence between vertices found
// create triangles
// needs some functions
auto distanceRingIntsModulo = [](const int d1, const int d2) {
const int r = (d1 - d2) < 0 ? d2 - d1 : d1 - d2;
return (r > 2 ? 6 - r : r);
};
auto midpointRingIntModulo = [](const int d1, const int d2) {
const int dmax = (d1 > d2) ? d1 : d2;
const int dmin = (d1 < d2) ? d1 : d2;
return ((dmax + 2) % 6 == dmin) ? (dmax + 1) % 6 : (dmax + dmin) / 2;
};
for (int r = 0; r < cnt_sz; r++) {
const uint tid1 = get_c(i, r, c_);
const uint tid2 = get_c(i, ((r + 1) % cnt_sz), c_);
const uint cid1 = tcon_[tid1];
const uint cid2 = tcon_[tid2];
// compute index distance
const int dst = distanceRingIntsModulo(cid1, cid2);
switch (dst)
{
case 0:
{
addHalfedges(he_, het_, v_gindex[tid1], v_gindex[tid2], tg_idx[cid1]);
//const int a_ = atomicAdd(he_cnt, 3);
//addHalfedges(nr_he, he_e, he_table, he_ids, a_, v_gindex[tid1], v_gindex[tid2], tg_idx[cid1]);
//t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[tid1], v_gindex[tid2], tg_idx[cid1], 0);
}
break;
case 1:
{
// measure diagonals
// triangulate along shortest diagonal
float u_edge = e_vert(tid1, 0);
float v_edge = e_vert(tid1, 1);
float w_edge = e_vert(tid1, 2);
const float l1 = (u_edge - hvt[cid2].x)*(u_edge - hvt[cid2].x) + (v_edge - hvt[cid2].y)*(v_edge - hvt[cid2].y) + (w_edge - hvt[cid2].z)*(w_edge - hvt[cid2].z);
u_edge = e_vert(tid2, 0);
v_edge = e_vert(tid2, 1);
w_edge = e_vert(tid2, 2);
const double l2 = (u_edge - hvt[cid1].x)*(u_edge - hvt[cid1].x) + (v_edge - hvt[cid1].y)*(v_edge - hvt[cid1].y) + (w_edge - hvt[cid1].z)*(w_edge - hvt[cid1].z);
if (l1 < l2) {
addHalfedges(he_, het_, v_gindex[tid1], v_gindex[tid2], tg_idx[cid2]);
addHalfedges(he_, het_, v_gindex[tid1], tg_idx[cid2], tg_idx[cid1]);
}
else {
addHalfedges(he_, het_, v_gindex[tid1], v_gindex[tid2], tg_idx[cid1]);
addHalfedges(he_, het_, v_gindex[tid2], tg_idx[cid2], tg_idx[cid1]);
}
}
break;
case 2:
{
const int cidm = midpointRingIntModulo(cid1, cid2);
addHalfedges(he_, het_, v_gindex[tid1], v_gindex[tid2], tg_idx[cidm]);
addHalfedges(he_, het_, v_gindex[tid1], tg_idx[cidm], tg_idx[cid1]);
addHalfedges(he_, het_, v_gindex[tid2], tg_idx[cid2], tg_idx[cidm]);
}
break;
} // switch
} // for loop over the vertices of the contour
} // if (_not_tunnel)
} // for loop over contours
if (cnt_ == 1) {
// there is a single contour
// triangulate and close inner hexagon
addHalfedges(he_, het_, tg_idx[0], tg_idx[2], tg_idx[1]);
addHalfedges(he_, het_, tg_idx[2], tg_idx[4], tg_idx[3]);
addHalfedges(he_, het_, tg_idx[0], tg_idx[5], tg_idx[4]);
addHalfedges(he_, het_, tg_idx[0], tg_idx[4], tg_idx[2]);
}
}
else {
// there is no tunnel
// handle case with no saddle point as simple polygons with 3, 4, 5 or six vertices
const unsigned char nr_u{ (unsigned char)nrQSolFace(0, q_sol) };
const unsigned char nr_v{ (unsigned char)nrQSolFace(1, q_sol) };
const unsigned char nr_w{ (unsigned char)nrQSolFace(2, q_sol) };
const unsigned char nr_t{ (unsigned char)(nr_u + nr_v + nr_w) };
if (nr_t == nr_u || nr_t == nr_v || nr_t == nr_w) {
// loop over all contours
for (int i = 0; i < (int)cnt_; i++) {
switch (get_cnt_size(i, c_)) {
case 3:
{
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)]);
}
break;
case 4:
{
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)]);
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 2, c_)], v_gindex[get_c(i, 3, c_)]);
}
break;
case 5:
{
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)]);
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 2, c_)], v_gindex[get_c(i, 3, c_)]);
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 3, c_)], v_gindex[get_c(i, 4, c_)]);
}
break;
case 6:
{
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 3, c_)]);
addHalfedges(he_, het_, v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)], v_gindex[get_c(i, 3, c_)]);
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 3, c_)], v_gindex[get_c(i, 4, c_)]);
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 4, c_)], v_gindex[get_c(i, 5, c_)]);
}
break;
} // switch over size of contour
} // loop over contorus
} // thre are no saddle points
else {
// there are saddle points
//fc1 = fs(1, 1)*fs(2, 1) + fs(1, 2)*fs(2, 2);
//fc2 = fs(1, 1)*fs(3, 1) + fs(1, 2)*fs(3, 2);
//fc3 = fs(2, 1)*fs(3, 2) + fs(2, 2)*fs(3, 1);
unsigned char fs[3][2]{ { (unsigned char)(q_sol & 1), (unsigned char)((q_sol >> 1) & 1) },{ (unsigned char)((q_sol >> 2) & 1), (unsigned char)((q_sol >> 3) & 1) },{ (unsigned char)((q_sol >> 4) & 1), (unsigned char)((q_sol >> 5) & 1) } };
const unsigned char fc1 = fs[0][0] * fs[1][0] + fs[0][1] * fs[1][1];
const unsigned char fc2 = fs[0][0] * fs[2][0] + fs[0][1] * fs[2][1];
const unsigned char fc3 = fs[1][0] * fs[2][1] + fs[1][1] * fs[2][0];
const unsigned char c_faces = fc1 + fc2 + fc3;
float ucoord{};
float vcoord{};
float wcoord{};
switch (c_faces) {
case 2:
{
if (fc1 == 0) {
ucoord = fs[0][0] * ui[0] + fs[0][1] * ui[1];
vcoord = fs[1][0] * vi[0] + fs[1][1] * vi[1];
wcoord = fs[1][0] * wi[1] + fs[1][1] * wi[0];
}
else if (fc2 == 0) {
ucoord = fs[0][0] * ui[0] + fs[0][1] * ui[1];
vcoord = fs[0][0] * vi[0] + fs[0][1] * vi[1];
wcoord = fs[0][0] * wi[1] + fs[0][1] * wi[0];
}
else if (fc3 == 0) {
ucoord = fs[1][0] * ui[0] + fs[1][1] * ui[1];
vcoord = fs[1][0] * vi[0] + fs[1][1] * vi[1];
wcoord = fs[1][0] * wi[0] + fs[1][1] * wi[1];
}
}
break;
case 3:
{
ucoord = (fs[0][0] * ui[0] + fs[0][1] * ui[1]) / (fs[0][0] + fs[0][1]);
vcoord = (fs[1][0] * vi[0] + fs[1][1] * vi[1]) / (fs[1][0] + fs[1][1]);
wcoord = (fs[2][0] * wi[0] + fs[2][1] * wi[1]) / (fs[2][0] + fs[2][1]);
}
break;
case 4:
{
const unsigned char nr_u = fs[0][0] + fs[0][1];
const unsigned char nr_v = fs[1][0] + fs[1][1];
const unsigned char nr_w = fs[2][0] + fs[2][1];
if (nr_w == 1) {
ucoord = fs[2][0] * ui[0] + fs[2][1] * ui[1];
vcoord = fs[2][1] * vi[0] + fs[2][0] * vi[1];
wcoord = fs[2][0] * wi[0] + fs[2][1] * wi[1];
}
else if (nr_v == 1) {
ucoord = fs[1][0] * ui[0] + fs[1][1] * ui[1];
vcoord = fs[1][0] * vi[0] + fs[1][1] * vi[1];
wcoord = fs[1][1] * wi[0] + fs[1][0] * wi[1];
}
else if (nr_u == 1) {
ucoord = fs[0][0] * ui[0] + fs[0][1] * ui[1];
vcoord = fs[0][0] * vi[0] + fs[0][1] * vi[1];
wcoord = fs[0][0] * wi[0] + fs[0][1] * wi[1];
}
}
break;
} // switch(c_faces)
// create inner vertex
float4 ip;
float4 in;
ip.x = (1 - wcoord)*((1 - vcoord)*(p[0].x + ucoord*(p[1].x - p[0].x)) + vcoord*(p[2].x + ucoord*(p[3].x - p[2].x))) + wcoord*((1 - vcoord)*(p[4].x + ucoord*(p[5].x - p[4].x)) + vcoord*(p[6].x + ucoord*(p[7].x - p[6].x)));
ip.y = (1 - wcoord)*((1 - vcoord)*(p[0].y + ucoord*(p[1].y - p[0].y)) + vcoord*(p[2].y + ucoord*(p[3].y - p[2].y))) + wcoord*((1 - vcoord)*(p[4].y + ucoord*(p[5].y - p[4].y)) + vcoord*(p[6].y + ucoord*(p[7].y - p[6].y)));
ip.z = (1 - wcoord)*((1 - vcoord)*(p[0].z + ucoord*(p[1].z - p[0].z)) + vcoord*(p[2].z + ucoord*(p[3].z - p[2].z))) + wcoord*((1 - vcoord)*(p[4].z + ucoord*(p[5].z - p[4].z)) + vcoord*(p[6].z + ucoord*(p[7].z - p[6].z)));
in.x = (1 - wcoord)*((1 - vcoord)*(n[0].x + ucoord*(n[1].x - n[0].x)) + vcoord*(n[2].x + ucoord*(n[3].x - n[2].x))) + wcoord*((1 - vcoord)*(n[4].x + ucoord*(n[5].x - n[4].x)) + vcoord*(n[6].x + ucoord*(n[7].x - n[6].x)));
in.y = (1 - wcoord)*((1 - vcoord)*(n[0].y + ucoord*(n[1].y - n[0].y)) + vcoord*(n[2].y + ucoord*(n[3].y - n[2].y))) + wcoord*((1 - vcoord)*(n[4].y + ucoord*(n[5].y - n[4].y)) + vcoord*(n[6].y + ucoord*(n[7].y - n[6].y)));
in.z = (1 - wcoord)*((1 - vcoord)*(n[0].z + ucoord*(n[1].z - n[0].z)) + vcoord*(n[2].z + ucoord*(n[3].z - n[2].z))) + wcoord*((1 - vcoord)*(n[4].z + ucoord*(n[5].z - n[4].z)) + vcoord*(n[6].z + ucoord*(n[7].z - n[6].z)));
// normalize normal
const float factor = std::sqrt(in.x * in.x + in.y * in.y + in.z * in.z);
in.x = in.x / factor;
in.y = in.y / factor;
in.z = in.z / factor;
// the fourth coordinate
ip.w = 1.f;
in.w = 0.f;
// global index
int gidx = int(9 * gl_index + v_count);
// this point is only used if contours with more than three vertices
// are present
for (int i = 0; i < (int)cnt_; i++) {
if (get_cnt_size(i, c_) > 3) {
gidx = insert_vertex(gidx, ht_, v_, ip, in);
}
}
//bool pt_used{ false };
// loop over the contorus
for (int i = 0; i < (int)cnt_; i++) {
const unsigned char cnt_sz = (unsigned char)get_cnt_size(i, c_);
if (cnt_sz == 3) {
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)]);
}
else {
//pt_used = true;
for (int t = 0; t < cnt_sz; t++) {
// add triangle to list
addHalfedges(he_, het_, v_gindex[get_c(i, t, c_)], v_gindex[get_c(i, (t + 1) % cnt_sz, c_)], gidx);
}
}
}
} // else - there are saddle points
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// HOST CODE
//
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// read a volume data from file
void
p_mc::MarchingCubes::readDataFromFile(const std::string& i_file, std::array<int,3>& dim, std::array<float,3>& origin, std::array<float,3>& spacing, std::vector<float>& v_data)
{
std::FILE* f{ nullptr };
errno_t status = fopen_s(&f, i_file.c_str(), "rb");
if (status != 0) {
std::cerr << "ERROR: can't open file " << i_file.c_str() << std::endl;
exit(1);
}
short x_size;
short y_size;
short z_size;
std::fread(&x_size, sizeof(unsigned short), 1, f);
std::fread(&y_size, sizeof(unsigned short), 1, f);
std::fread(&z_size, sizeof(unsigned short), 1, f);
float dx;
float dy;
float dz;
std::fread(&dx, sizeof(float), 1, f);
std::fread(&dy, sizeof(float), 1, f);
std::fread(&dz, sizeof(float), 1, f);
int v_size = x_size * y_size * z_size;
ushort* v_buff = new ushort[v_size];
std::fread(&v_buff[0], sizeof(unsigned short), v_size, f);
// fill into host vector
v_data.resize(v_size);
for (int i = 0; i < v_size; i++) {
v_data[i] = float(v_buff[i]);
}
std::fclose(f);
delete[] v_buff;
// set uniform grid data
dim[0] = x_size;
dim[1] = y_size;
dim[2] = z_size;
origin[0] = 0.f;
origin[1] = 0.f;
origin[2] = 0.f;
spacing[0] = dx;
spacing[1] = dy;
spacing[2] = dz;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Main function to process a volume data set
void
p_mc::MarchingCubes::mc_halfedge(const float i0,const std::string& i_file, int& nr_v, float** vertices, float** normals, int& nr_t, int4** h_hee, int** h_hev,int** h_hef)
{
Halfedges he_;
HalfedgeHashTable het_;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
std::cout << " ... read data from file\n";
std::vector<float> h_data;
UGrid ugrid;
std::array<int, 3> dims;
std::array<float, 3> origin;
std::array<float, 3> spacing;
readDataFromFile(i_file, dims, origin, spacing, h_data);
ugrid.size(dims[0], dims[1], dims[2]);
ugrid.dx = spacing[0];
ugrid.dy = spacing[1];
ugrid.dz = spacing[2];
ugrid.x0 = origin[0];
ugrid.y0 = origin[1];
ugrid.z0 = origin[2];
// measure processing time
CTimer ctimer1;
CTimer ctimer2;
// allocate 3D texture
std::cout << " ... allocate 3D texture\n";
const size_t x_size = (size_t)ugrid.idim;
const size_t y_size = (size_t)ugrid.jdim;
const size_t z_size = (size_t)ugrid.kdim;
const size_t t_size = x_size * y_size * z_size;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// create texture buffer for 3D data
// copy data to buffer
hipArray* d_data;
hipExtent extent = make_hipExtent(x_size, y_size, z_size);
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_data, &desc, extent);
//cudaCheckError();
hipMemcpy3DParms params{ 0 };
params.srcPtr = make_hipPitchedPtr(&(h_data[0]), x_size * sizeof(float), x_size, y_size);
params.dstArray = d_data;
params.extent = extent;
params.kind = hipMemcpyHostToDevice;
hipMemcpy3D(¶ms);
//cudaCheckError();
// create Texture object
// Texture description
hipTextureDesc texDesc{};
memset(&texDesc, 0, sizeof(hipTextureDesc));
texDesc.readMode = hipReadModeElementType;
texDesc.filterMode = hipFilterModePoint;
// Texture resource description
hipResourceDesc resDesc{};
memset(&resDesc, 0, sizeof(hipResourceDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = d_data;
// create Texture object
hipCreateTextureObject(&m_volume, &resDesc, &texDesc, nullptr);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// copy lookup tables
//ctimer2.start();
MC_lookup l_tables;
initMC_lookup(l_tables, e_pattern, t_pattern, t_ambig);
//ctimer2.stop();
//ctimer2.print(std::string(" ... lookup tables"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// globa processing time
std::cout << " ... compute isosurface\n";
ctimer1.start();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Atomic counter
// Alloc memory for counting nr. of vertices
//ctimer2.start();
CellsIds cells;
initCells(cells,(int)(t_size / 3));
AmbiguousCells acells;
initACells(acells,(int)(t_size / 4));
int* d_vcount{ nullptr };
int* d_acount{ nullptr };
hipMalloc(&d_vcount, sizeof(int));
hipMemset(d_vcount, 0, sizeof(int));
hipMalloc(&d_acount, sizeof(int));
hipMemset(d_acount, 0, sizeof(int));
uint b_size = 512;
uint g_size{ ((uint)t_size + b_size - 1) / b_size };
mc_count << < g_size, b_size >> >(cells, acells, d_vcount, d_acount, i0, m_volume, ugrid, l_tables);
//cudaCheckError();
// count
// each vertex is counted four times, except those at the boundaries
// inner vertices are overestimated.
//nr_v = (int)(warpReduce<int>(d_vcount, (int)t_size)) / 2;
// read array size
//int nr_cells{ 0 };
//hipMemcpy(&nr_cells, d_cpos, sizeof(int), hipMemcpyDeviceToHost);
//cudaCheckError();
int nr1{ 0 };
int nr2{ 0 };
hipMemcpy(&nr1, d_vcount, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&nr2, d_acount, sizeof(int), hipMemcpyDeviceToHost);
nr_v = (nr1 + nr2) / 2;
//ctimer2.stop();
//ctimer2.print(std::string(" ... count vertices"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute triangles
// 1. alloc memory for hash table
// 2. alloc memory for vertices
// 3. alloc memory for triangles
// 4. alloc memory for computing cell ids of ambiguous cases
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 1. alloc and init hash table
//ctimer2.start();
VertexHashTable ht_;
initVertexHashTable(ht_, nr_v); // appro. two times the number of vertices
b_size = 512;
g_size = (ht_.t_size + b_size - 1) / b_size;
init_hash_table << < g_size, b_size >> >(ht_);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 2. alloc and init vertices
Vertices v_;
initVertices(v_, nr_v);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 3. alloc and init triangles
Triangles t_;
initTriangles(t_, 2 * nr_v);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute iso-surface
const int nr_cells = size<CellsIds>(cells);
const int nr_acells = size<AmbiguousCells>(acells);
b_size = MC_BLOCKSIZE;
g_size = (nr_cells + b_size - 1) / b_size;
mc_slice << < g_size, b_size >> > (i0, m_volume, ugrid, l_tables, nr_cells, cells, ht_, v_, t_);
//cudaCheckError();
//ctimer2.stop();
//ctimer2.print(std::string(" ... mc_slice()"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute ambiguous cases
//ctimer2.start();
b_size = AMB_BLOCKSIZE;
g_size = (nr_acells + b_size - 1) / b_size;
t_slice << < g_size, b_size >> >(i0, m_volume, ugrid, l_tables, nr_acells, acells, ht_, v_, t_);
//cudaCheckError();
//ctimer2.stop();
//ctimer2.print(std::string(" ... ambiguous cases"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// don't need volume data any more
//hipFreeArray(d_data);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute shared vertex list for triangle mesh
// indices of triangles have to be mapped to vertex index in vertex array
// get number of vertices
//ctimer2.start();
nr_v = size<Vertices>(v_);
// get number of triangles
nr_t = size<Triangles>(t_);
// map triangles indices
b_size = 512;
g_size = (3 * nr_t + b_size - 1) / b_size;
hipLaunchKernelGGL(( map_triangles_fast) , dim3(g_size), dim3(b_size) , 0, 0, nr_t,ht_, t_);
//cudaCheckError();
//ctimer2.stop();
//ctimer2.print(std::string(" ... map triangles"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// create halfedge data structure for triangle mesh
// 1. there are three edges for each vertex
// an edge points to origin vertex
// an edge points to the face
// an edge points to next edge
// 2. each vertex has points to a halfedge
// 3. each triangle points to a halfedge
// Data structure:
// halfedge int4:
// he.x = origin vertex
// he.y = face
// he.z = next
// he.w = tween
// vertex int: point to one halfedge
// face int: point to one halfedge of this face
//ctimer2.start();
const int nr_he = 3 * nr_t;
initHalfedges(he_, nr_he, nr_v, nr_t);
initHalfedgeHashTable(het_, 2 * nr_he); // the hash table is 1.5 the total nr. of halfedges
//cudaCheckError();
//ctimer2.stop();
//ctimer2.print(std::string(" ... allocate memory for halfedge data structure"));
// for each triangle create three halfedges
// compute unique halfedge twin ids and store in hash table
//ctimer2.start();
b_size = 256;
g_size = (nr_he + b_size - 1) / b_size;
create_halfedge << < g_size, b_size >> > (nr_he, t_, he_, het_);
//ctimer2.stop();
//ctimer2.print(std::string(" ... create halfedges"));
// connect each half edge with its twin
// process each entrie in halfedge table
//ctimer2.start();
g_size = (het_.t_size + b_size - 1) / b_size;
map_halfedge_twins_fast << < g_size, b_size >> > (he_, het_);
//ctimer2.stop();
//ctimer2.print(std::string(" ... map halfedges"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute processing time
ctimer1.stop();
hipDeviceSynchronize();
ctimer1.print(std::string("tmc"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// copy data back to host
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Shared vertex list
float4* v_array = new float4[nr_v];
float4* n_array = new float4[nr_v];
int4* t_array = new int4[nr_t];
hipMemcpy(v_array, v_.vertices, nr_v * sizeof(float4), hipMemcpyDeviceToHost);
hipMemcpy(n_array, v_.normals, nr_v * sizeof(float4), hipMemcpyDeviceToHost);
hipMemcpy(t_array, t_.triangles, nr_t * sizeof(int4), hipMemcpyDeviceToHost);
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// halfedge data structure
*h_hee = new int4[nr_he];
*h_hev = new int[nr_v];
*h_hef = new int[nr_t];
hipMemcpy(*h_hee, he_.he_e, nr_he * sizeof(int4), hipMemcpyDeviceToHost);
hipMemcpy(*h_hev, he_.he_v, nr_v * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(*h_hef, he_.he_f, nr_t * sizeof(int), hipMemcpyDeviceToHost);
std::cout << " ... total nr. of vertices " << nr_v << std::endl;
std::cout << " ... total nr. of triangles " << nr_t << std::endl;
std::cout << " ... total nr. of unambiguous cells " << nr1 << std::endl;
std::cout << " ... total nr. of ambiguous cells " << nr2 << std::endl;
*vertices = new float[3 * nr_v];
*normals = new float[3 * nr_v];
for (int id = 0; id < nr_v; id++) {
// copy vertices
(*vertices)[3 * id] = v_array[id].x;
(*vertices)[3 * id + 1] = v_array[id].y;
(*vertices)[3 * id + 2] = v_array[id].z;
// copy normals
(*normals)[3 * id] = -n_array[id].x;
(*normals)[3 * id + 1] = -n_array[id].y;
(*normals)[3 * id + 2] = -n_array[id].z;
}
std::cout << " ... done\n";
// host memory
// host memory
delete[] v_array;
delete[] n_array;
delete[] t_array;
//delete[] h_hee;
//delete[] h_hev;
//delete[] h_hef;
// free common data
// free memory
hipFreeArray(d_data);
freeMC_lookup(l_tables);
freeVertices(v_);
freeTriangles(t_);
freeHalfedges(he_);
freeHalfedgeHashTable(het_);
freeCells(cells);
freeACells(acells);
// arrays for vertex count
hipFree(d_acount);
hipFree(d_vcount);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// HALFEDGE RECONSTRUCTION
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Main function to process a volume data set
void
p_mc::MarchingCubes::mc_sharedvertex(const float i0, const std::string& i_file, int& nr_v, float** vertices, float** normals, int& nr_t, int** triangles)
{
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
std::cout << " ... read data from file\n";
std::vector<float> h_data;
UGrid ugrid;
std::array<int, 3> dims;
std::array<float, 3> origin;
std::array<float, 3> spacing;
readDataFromFile(i_file, dims, origin, spacing, h_data);
ugrid.size(dims[0], dims[1], dims[2]);
ugrid.dx = spacing[0];
ugrid.dy = spacing[1];
ugrid.dz = spacing[2];
ugrid.x0 = origin[0];
ugrid.y0 = origin[1];
ugrid.z0 = origin[2];
// measure processing time
CTimer ctimer1;
CTimer ctimer2;
// allocate 3D texture
std::cout << " ... allocate 3D texture\n";
const size_t x_size = (size_t)ugrid.idim;
const size_t y_size = (size_t)ugrid.jdim;
const size_t z_size = (size_t)ugrid.kdim;
const size_t t_size = x_size * y_size * z_size;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// create texture buffer for 3D data
// copy data to buffer
hipArray* d_data;
hipExtent extent = make_hipExtent(x_size, y_size, z_size);
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_data, &desc, extent);
//cudaCheckError();
hipMemcpy3DParms params{ 0 };
params.srcPtr = make_hipPitchedPtr(&(h_data[0]), x_size * sizeof(float), x_size, y_size);
params.dstArray = d_data;
params.extent = extent;
params.kind = hipMemcpyHostToDevice;
hipMemcpy3D(¶ms);
//cudaCheckError();
// create Texture object
// Texture description
hipTextureDesc texDesc{};
memset(&texDesc, 0, sizeof(hipTextureDesc));
texDesc.readMode = hipReadModeElementType;
texDesc.filterMode = hipFilterModePoint;
// Texture resource description
hipResourceDesc resDesc{};
memset(&resDesc, 0, sizeof(hipResourceDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = d_data;
// create Texture object
hipCreateTextureObject(&m_volume, &resDesc, &texDesc, nullptr);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// copy lookup tables
//ctimer2.start();
MC_lookup l_tables;
initMC_lookup(l_tables, e_pattern, t_pattern, t_ambig);
//ctimer2.stop();
//ctimer2.print(std::string(" ... lookup tables"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// globa processing time
std::cout << " ... compute isosurface\n";
ctimer1.start();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Atomic counter
// Alloc memory for counting nr. of vertices
//ctimer2.start();
CellsIds cells;
initCells(cells, (int)(t_size / 3));
AmbiguousCells acells;
initACells(acells, (int)(t_size / 4));
int* d_vcount{ nullptr };
int* d_acount{ nullptr };
hipMalloc(&d_vcount, sizeof(int));
hipMemset(d_vcount, 0, sizeof(int));
hipMalloc(&d_acount, sizeof(int));
hipMemset(d_acount, 0, sizeof(int));
uint b_size = 512;
uint g_size{ ((uint)t_size + b_size - 1) / b_size };
mc_count << < g_size, b_size >> >(cells, acells, d_vcount, d_acount, i0, m_volume, ugrid, l_tables);
//cudaCheckError();
// count
// each vertex is counted four times, except those at the boundaries
// inner vertices are overestimated.
//nr_v = (int)(warpReduce<int>(d_vcount, (int)t_size)) / 2;
// read array size
//int nr_cells{ 0 };
//hipMemcpy(&nr_cells, d_cpos, sizeof(int), hipMemcpyDeviceToHost);
//cudaCheckError();
int nr1{ 0 };
int nr2{ 0 };
hipMemcpy(&nr1, d_vcount, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&nr2, d_acount, sizeof(int), hipMemcpyDeviceToHost);
nr_v = (nr1 + nr2) / 2;
//ctimer2.stop();
//ctimer2.print(std::string(" ... count vertices"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute triangles
// 1. alloc memory for hash table
// 2. alloc memory for vertices
// 3. alloc memory for triangles
// 4. alloc memory for computing cell ids of ambiguous cases
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 1. alloc and init hash table
//ctimer2.start();
VertexHashTable ht_;
initVertexHashTable(ht_, nr_v); // appro. two times the number of vertices
b_size = 512;
g_size = (ht_.t_size + b_size - 1) / b_size;
init_hash_table << < g_size, b_size >> >(ht_);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 2. alloc and init vertices
Vertices v_;
initVertices(v_, nr_v);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 3. alloc and init triangles
Triangles t_;
initTriangles(t_, 2 * nr_v);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute iso-surface
const int nr_cells = size<CellsIds>(cells);
const int nr_acells = size<AmbiguousCells>(acells);
b_size = MC_BLOCKSIZE;
g_size = (nr_cells + b_size - 1) / b_size;
mc_slice << < g_size, b_size >> > (i0, m_volume, ugrid, l_tables, nr_cells, cells, ht_, v_, t_);
//cudaCheckError();
//ctimer2.stop();
//ctimer2.print(std::string(" ... mc_slice()"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute ambiguous cases
//ctimer2.start();
b_size = AMB_BLOCKSIZE;
g_size = (nr_acells + b_size - 1) / b_size;
t_slice << < g_size, b_size >> >(i0, m_volume, ugrid, l_tables, nr_acells, acells, ht_, v_, t_);
//cudaCheckError();
//ctimer2.stop();
//ctimer2.print(std::string(" ... ambiguous cases"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// don't need volume data any more
//hipFreeArray(d_data);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute shared vertex list for triangle mesh
// indices of triangles have to be mapped to vertex index in vertex array
// get number of vertices
//ctimer2.start();
nr_v = size<Vertices>(v_);
// get number of triangles
nr_t = size<Triangles>(t_);
// map triangles indices
b_size = 512;
g_size = (3 * nr_t + b_size - 1) / b_size;
map_triangles_fast << < g_size, b_size >> >(nr_t, ht_, t_);
//cudaCheckError();
//ctimer2.stop();
//ctimer2.print(std::string(" ... map triangles"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute processing time
ctimer1.stop();
hipDeviceSynchronize();
ctimer1.print(std::string("tmc"));
// Shared vertex list
float4* v_array = new float4[nr_v];
float4* n_array = new float4[nr_v];
//vAttr* vts = new vAttr[nr_v];
int4* t_array = new int4[nr_t];
hipMemcpy(v_array, v_.vertices, nr_v * sizeof(float4), hipMemcpyDeviceToHost);
hipMemcpy(n_array, v_.normals, nr_v * sizeof(float4), hipMemcpyDeviceToHost);
//hipMemcpy(vts, v_.v, nr_v * sizeof(vAttr), hipMemcpyDeviceToHost);
hipMemcpy(t_array, t_.triangles, nr_t * sizeof(int4), hipMemcpyDeviceToHost);
std::cout << " ... total nr. of vertices " << nr_v << std::endl;
std::cout << " ... total nr. of triangles " << nr_t << std::endl;
std::cout << " ... total nr. of unambiguous cells " << nr1 << std::endl;
std::cout << " ... total nr. of ambiguous cells " << nr2 << std::endl;
*vertices = new float[3 * nr_v];
*normals = new float[3 * nr_v];
*triangles = new int[3 * nr_t];
for (int id = 0; id < nr_v; id++) {
// copy vertices
(*vertices)[3 * id] = v_array[id].x;
(*vertices)[3 * id + 1] = v_array[id].y;
(*vertices)[3 * id + 2] = v_array[id].z;
(*vertices)[3 * id + 3] = 1.0f;
//// copy normals
(*normals)[3 * id] = -n_array[id].x;
(*normals)[3 * id + 1] = -n_array[id].y;
(*normals)[3 * id + 2] = -n_array[id].z;
(*normals)[3 * id + 3] = 0.0f;
}
for (int id = 0; id < nr_t; id++) {
(*triangles)[3 * id] = t_array[id].x;
(*triangles)[3 * id + 1] = t_array[id].y;
(*triangles)[3 * id + 2] = t_array[id].z;
}
std::cout << " ... done\n";
// host memory
delete[] v_array;
delete[] n_array;
//delete[] vts;
delete[] t_array;
// free common data
// free memory
hipFreeArray(d_data);
freeMC_lookup(l_tables);
freeVertices(v_);
freeTriangles(t_);
freeCells(cells);
freeACells(acells);
// arrays for vertex count
hipFree(d_acount);
hipFree(d_vcount);
}
| 02dad1383d747d013d6a3eb0bbc79d4fea4861db.cu | #include "MarchingCubes.h"
// Lauch bounds
#define THREADS_PER_BLOCK 256
#if __CUDA_ARCH__ >= 200
#define MY_KERNEL_MAX_THREADS (2 * THREADS_PER_BLOCK)
#define MY_KERNEL_MIN_BLOCKS 3
#else
#define MY_KERNEL_MAX_THREADS THREADS_PER_BLOCK
#define MY_KERNEL_MIN_BLOCKS 2
#endif
// defines
#define BIT_1 0x1
#define BIT_2 0x2
#define BIT_3 0x4
#define BIT_4 0x8
#define BIT_5 0x10
#define BIT_6 0x20
#define BIT_7 0x40
#define BIT_8 0x80
#define BIT_16 0x8000
// Empty Bucket
#define EMPTY_BUCKET_32 -1
#define EMPTY_BUCKET_64 0ull
// Shared memory experiments
#define AMB_BLOCKSIZE 64
#define MC_BLOCKSIZE 512
// type aliases
// Introduce convenient aliases here
using uint = unsigned int;
using uchar = unsigned char;
using ushort = unsigned short;
using ullong = unsigned long long;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// error handling
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(0); \
} \
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Convenience function
template<typename T>
int size(T t) {
int s{ 0 };
cudaMemcpy(&s, t.t_size, sizeof(int), cudaMemcpyDeviceToHost);
return s;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The hash table to obtain a unique vertex index
struct VertexHashTable {
int* key{ nullptr };
int* addr{ nullptr };
int t_size;
};
void initVertexHashTable(VertexHashTable& ht, const int size) {
ht.t_size = size;
cudaMalloc(&ht.key, size * sizeof(int));
cudaMalloc(&ht.addr, size * sizeof(int));
//cudaMemset(key, EMPTY_BUCKET_32, size * sizeof(int));
}
void freeVertexHashTable(VertexHashTable& h) {
if (h.key != nullptr) {
cudaFree(h.key);
}
if (h.addr != nullptr) {
cudaFree(h.addr);
}
h.t_size = 0;
h.key = nullptr;
h.addr = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Vertex array
struct Vertices {
float4* vertices{ nullptr };
float4* normals{ nullptr };
int* t_size{ nullptr }; // atomic counter to compute nr. of vertices
};
void initVertices(Vertices& v, const int size) {
cudaMalloc(&v.vertices, size * sizeof(float4));
cudaMalloc(&v.normals, size * sizeof(float4));
cudaMalloc(&v.t_size, sizeof(int));
cudaMemset(v.t_size, 0, sizeof(int));
}
void freeVertices(Vertices& v) {
if (v.vertices != nullptr) {
cudaFree(v.vertices);
}
if (v.normals != nullptr) {
cudaFree(v.normals);
}
if (v.t_size != nullptr) {
cudaFree(v.t_size);
}
v.t_size = nullptr;
v.vertices = nullptr;
v.normals = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// a triangle consist of three indices
struct Triangles {
int a_size; // size of buffer
int4* triangles{ nullptr };
int* t_size{ nullptr }; // atomic counter to compute nr. of triangles
};
void initTriangles(Triangles& t, const int size) {
t.a_size = size;
cudaMalloc(&t.triangles, size * sizeof(int4));
cudaMalloc(&t.t_size, sizeof(int));
cudaMemset(t.t_size, 0, sizeof(int));
}
void freeTriangles(Triangles& t) {
if (t.triangles != nullptr) {
cudaFree(t.triangles);
}
if (t.t_size != nullptr) {
cudaFree(t.t_size);
}
t.a_size = 0;
t.triangles = nullptr;
t.t_size = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Track cell cases and ids
struct CellsIds {
int* cells_{ nullptr };
int* t_size{ nullptr }; // atomic counter to get address of ambiguous cell in a_cells array
};
void initCells(CellsIds& c, const int size) {
cudaMalloc(&c.cells_, size * sizeof(int));
cudaMalloc(&c.t_size, sizeof(int));
cudaMemset(c.t_size, 0, sizeof(int));
}
void freeCells(CellsIds& c) {
if (c.cells_ != nullptr) {
cudaFree(c.cells_);
}
if (c.t_size != nullptr) {
cudaFree(c.t_size);
}
c.t_size = nullptr;
c.cells_ = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Track ambiguous cases
struct AmbiguousCells {
int* a_cells{ nullptr };
int* t_size{ nullptr }; // atomic counter to get address of ambiguous cell in a_cells array
};
void initACells(AmbiguousCells& ac, const int size) {
cudaMalloc(&ac.a_cells, size * sizeof(int));
cudaMalloc(&ac.t_size, sizeof(int));
cudaMemset(ac.t_size, 0, sizeof(int));
}
void freeACells(AmbiguousCells& ac) {
if (ac.a_cells != nullptr) {
cudaFree(ac.a_cells);
}
if (ac.t_size != nullptr) {
cudaFree(ac.t_size);
}
ac.t_size = nullptr;
ac.a_cells = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Hash table to map halfedge twins
struct HalfedgeHashTable {
int t_size{ 0 };
unsigned long long* key{ nullptr };
int2* he_ids{ nullptr };
};
void initHalfedgeHashTable(HalfedgeHashTable& t, const int size) {
t.t_size = size;
cudaMalloc(&t.key, size * sizeof(unsigned long long));
//cudaCheckError();
cudaMemset(t.key, 0, size * sizeof(unsigned long long));
//cudaCheckError();
cudaMalloc(&t.he_ids, size * sizeof(int2));
}
__device__ bool addHalfedgeToHashTable (HalfedgeHashTable t, const int addr, const int v0, const int v1) {
unsigned long long x = (unsigned long long)v0;
unsigned long long y = (unsigned long long)v1;
unsigned long long key = (x < y) ? y : x;
key = key + (x + y) * (x + y + 1) / 2ull;
{
key = (~key) + (key << 21); // key = (key << 21) - key - 1;
key = key ^ (key >> 24);
key = (key + (key << 3)) + (key << 8); // key * 265
key = key ^ (key >> 14);
key = (key + (key << 2)) + (key << 4); // key * 21
key = key ^ (key >> 28);
key = key + (key << 31);
}
// open hashing
int h = int(key % (unsigned long long)t.t_size);
int e = 1;
for (int loop = 0; loop < 128; loop++) {
unsigned long long old = atomicCAS(&t.key[h], EMPTY_BUCKET_64, key);
if (old == EMPTY_BUCKET_64 || old == key) {
if (v0 < v1) {
t.he_ids[h].x = addr;
}
else {
t.he_ids[h].y = addr;
}
return true;
}
else {
// step with linear probing
h = (h + e*e) % t.t_size;
e = e + 1;
}
}
//printf("ERROR: can't add halfedge\n");
return false;
}
void freeHalfedgeHashTable(HalfedgeHashTable& t) {
if (t.key != nullptr) {
cudaFree(t.key);
}
if (t.he_ids != nullptr) {
cudaFree(t.he_ids);
}
//
t.t_size = 0;
t.key = nullptr;
t.he_ids = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Halfedge data structure
struct Halfedges {
int* t_size{ nullptr };
int buffSize{ 0 };
// halfedge int4:
// he.x = origin vertex
// he.y = face
// he.z = next
// he.w = tween
int4* he_e{ nullptr };
int* he_v{ nullptr }; // helfedge id
int* he_f{ nullptr };
};
void initHalfedges(Halfedges& h, const int nr_he, const int nr_v, const int nr_t) {
h.buffSize = nr_he;
cudaMalloc(&h.he_e, nr_he * sizeof(int4));
//cudaCheckError();
cudaMalloc(&h.he_v, nr_v * sizeof(int));
//cudaCheckError();
cudaMalloc(&h.he_f, nr_t * sizeof(int));
cudaMalloc(&h.t_size, sizeof(int));
cudaMemset(&h.t_size, 0, sizeof(int));
}
//__device__ void add(HalfedgeHashTable het_, const int v0, const int v1, const int v2) {
// const int a_ = atomicAdd(t_size, 3);
// const int f_ = a_ / 3;
// // he 0
// he_e[a_].x = v0;
// he_e[a_].y = f_; // there are three halfedges for each face (triangle)
// he_e[a_].z = a_ + 1; // next
// he_e[a_].w = -1; // default is boundary edge
//
// // he 1
// he_e[a_ + 1].x = v1;
// he_e[a_ + 1].y = f_; // there are three halfedges for each face (triangle)
// he_e[a_ + 1].z = a_ + 2;
// he_e[a_ + 1].w = -1; // default is boundary edge
//
// // he 2
// he_e[a_ + 2].x = v2;
// he_e[a_ + 2].y = f_; // there are three halfedges for each face (triangle)
// he_e[a_ + 2].z = a_;
// he_e[a_ + 2].w = -1; // default is boundary edge
//
// // add halfedges ids to hash table
// het_.add(a_, v0, v1);
// het_.add(a_ + 1, v1, v2);
// het_.add(a_ + 2, v2, v0);
//}
void freeHalfedges(Halfedges& h) {
if (h.t_size != nullptr) {
cudaFree(h.t_size);
}
if (h.he_e != nullptr) {
cudaFree(h.he_e);
}
if (h.he_v != nullptr) {
cudaFree(h.he_v);
}
if (h.he_f != nullptr) {
cudaFree(h.he_f);
}
//
h.buffSize = 0;
h.t_size = nullptr;
h.he_e = nullptr;
h.he_v = nullptr;
h.he_f = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// MC lookup tables
struct MC_lookup {
ushort* e_{ nullptr };
unsigned long long* t_{ nullptr };
};
void initMC_lookup(MC_lookup& l, const std::array<unsigned short, 256>& ep_, const std::array<int, 4096>& tp_, const std::array<unsigned char, 256>& ta_) {
ushort* le_ = new ushort[256];
for (int i = 0; i < 256; i++) {
le_[i] = ep_[i];
le_[i] |= (ta_[i] == 105) ? BIT_16 : 0x0;
}
cudaMalloc(&l.e_, 256 * sizeof(ushort));
cudaMemcpy(l.e_, &le_[0], 256 * sizeof(ushort), cudaMemcpyHostToDevice);
cudaCheckError();
// create MC loolup table
unsigned long long* l_ = new unsigned long long[256];
unsigned long long flg = 0xFull;
for (int i = 0; i < 256; i++) {
int i_case = i * 16;
unsigned long long f = 0ull;
for (int t = 0; t < 16; t++) {
int mcval = tp_[i_case + t];
unsigned long long lmcval = (unsigned long long)mcval;
if (mcval == -1) {
f |= (flg << (t * 4));
}
else {
f |= (lmcval << (t * 4));
}
}
l_[i] = f;
}
cudaMalloc(&l.t_, 256 * sizeof(unsigned long long));
cudaMemcpy(l.t_, &l_[0], 256 * sizeof(unsigned long long), cudaMemcpyHostToDevice);
cudaCheckError();
delete[] le_;
delete[] l_;
}
void freeMC_lookup(MC_lookup& l)
{
if (l.e_ != nullptr) {
cudaFree(l.e_);
}
if (l.t_ != nullptr) {
cudaFree(l.t_);
}
l.e_ = nullptr;
l.t_ = nullptr;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Uniform grid
struct UniformGrid {
int idim{ 0 };
int jdim{ 0 };
int kdim{ 0 };
float x0{ 0 };
float y0{ 0 };
float z0{ 0 };
float dx{ 0 };
float dy{ 0 };
float dz{ 0 };
__device__ int gl_index(const int i, const int j, const int k) {
return (k * jdim * idim + j * idim + i);
}
__device__ int i_index(const int gl_index) {
return (gl_index % idim);
}
__device__ int j_index(const int gl_index) {
return ((gl_index / idim) % jdim);
}
__device__ int k_index(const int gl_index) {
return (gl_index / (idim * jdim));
}
__host__ void size(const int x_size, const int y_size, const int z_size) {
idim = x_size;
jdim = y_size;
kdim = z_size;
}
__host__ void origin(const float x, const float y, const float z) {
x0 = x;
y0 = y;
z0 = z;
}
__host__ void spacing(const float x, const float y, const float z) {
dx = x;
dy = y;
dz = z;
}
};
using UGrid = UniformGrid;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// timer based on CUDA timen routines
struct CTimer {
float e_milliseconds;
cudaEvent_t c_start;
cudaEvent_t c_stop;
CTimer() {
cudaEventCreate(&c_start);
cudaEventCreate(&c_stop);
}
void __host__ start() {
cudaEventRecord(c_start);
}
void __host__ stop() {
cudaEventRecord(c_stop);
cudaEventSynchronize(c_stop);
cudaEventElapsedTime(&e_milliseconds, c_start, c_stop);
}
void __host__ print() {
std::cout << std::setprecision(7) << " ... time in ms: " << e_milliseconds << std::endl;
}
void __host__ print(std::string& m) {
std::cout << std::setprecision(7) << " ... " << m << " time in ms: " << e_milliseconds << std::endl;
}
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using namespace p_mc;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute number of vertices computed for this cell
// compute only the intersection of the iso-surface with
// the cell edges
template<typename T>
__device__ uint numberOfSetBits(T n) {
// C or C++: use uint32_t
uint b = (uint)n;
b = b - ((b >> 1) & 0x55555555);
b = (b & 0x33333333) + ((b >> 2) & 0x33333333);
return (((b + (b >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Reduce
// warp reduce based on __shfl_down
template<typename T>
__device__ int warpReduceSum(T val) {
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
val += __shfl_down(val, offset);
}
return val;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// warp reduce kernel
template<typename T>
__global__ void warp_reduce_kernel(T *in, T* out, int N) {
T sum = T(0);
for (int i = blockIdx.x*blockDim.x + threadIdx.x; i<N; i += blockDim.x*gridDim.x) {
sum += in[i];
}
sum = warpReduceSum(sum);
if (threadIdx.x%warpSize == 0)
atomicAdd(out, sum);
}
// host function for warp reduce
template<typename T>
int warpReduce(T *i_data, const int N) {
int threads = 256;
int blocks = std::min((N + threads - 1) / threads, 2048);
T* d_sum{ nullptr };
cudaMalloc(&d_sum, sizeof(int));
cudaMemsetAsync(d_sum, 0, sizeof(int));
warp_reduce_kernel<typename T> << <blocks, threads >> >(i_data, d_sum, N);
cudaCheckError();
// return sum
T h_sum{ 0 };
cudaMemcpy(&h_sum, d_sum, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_sum);
return h_sum;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Index computations
//__host__ __device__ int global_index(const int i, const int j, const int k, const int idim, const int jdim, const int kdim)
//{
// return (k * jdim * idim + j * idim + i);
//}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// trilinear interpolation
__device__ void trilinear(float4& po, const float3 p[8], const float u, const float v, const float w)
{
po.x = (1 - w) * ((1 - v) * (p[0].x + u * (p[1].x - p[0].x)) + v * (p[2].x + u * (p[3].x - p[2].x))) + w * ((1 - v) * (p[4].x + u * (p[5].x - p[4].x)) + v * (p[6].x + u * (p[7].x - p[6].x)));
po.y = (1 - w) * ((1 - v) * (p[0].y + u * (p[1].y - p[0].y)) + v * (p[2].y + u * (p[3].y - p[2].y))) + w * ((1 - v) * (p[4].y + u * (p[5].y - p[4].y)) + v * (p[6].y + u * (p[7].y - p[6].y)));
po.z = (1 - w) * ((1 - v) * (p[0].z + u * (p[1].z - p[0].z)) + v * (p[2].z + u * (p[3].z - p[2].z))) + w * ((1 - v) * (p[4].z + u * (p[5].z - p[4].z)) + v * (p[6].z + u * (p[7].z - p[6].z)));
//po.y = (1 - w) * ((1 - v) * (p[0].y * (1 - u) + p[1].y * u) + v * (p[2].y * (1 - u) + p[3].y * u)) + w * ((1 - v) * (p[4].y * (1 - u) + p[5].y * u) + v * (p[6].y * (1 - u) + p[7].y * u));
//po.z = (1 - w) * ((1 - v) * (p[0].z * (1 - u) + p[1].z * u) + v * (p[2].z * (1 - u) + p[3].z * u)) + w * ((1 - v) * (p[4].z * (1 - u) + p[5].z * u) + v * (p[6].z * (1 - u) + p[7].z * u));
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Hash tables
// device hash function
__device__ uint hash_function( uint key )
{
key = (key ^ 61) ^ (key >> 16);
key = key + (key << 3);
key = key ^ (key >> 4);
key = key * 0x27d4eb2d;
key = key ^ (key >> 15);
return key;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Insert a key assigned to a vertex index in the hash table
// return the position in the array, where the key was inserted
// if the key was already in the array, e.g. other kernel was processing
// the same edge in the uniform grid, return false. This way, the kernel will
// not generate a new vertex. If the vertex was not created jet, the address in
// the array is returned so that the calling kernel can save this position
// of the vertex in the hash table
// v_gindex key is a unique global index assigned to the vertex
// Hash table:
// struct HashTable {
// int* key;
// int* addr;
// int t_size;
// };
// v_addr contains the position in the key array, where the key = v_gindex was stored
__device__ bool insert_vertex_key(const int v_gindex, VertexHashTable ht_, int& v_addr)
{
//const int start_address = int(hash_function((uint)v_gindex) % ht_.t_size);
// open hashing
//int h = start_address;
int h = int(hash_function((uint)v_gindex) % ht_.t_size);
int e = 1;
for (int loop = 0; loop < 128; loop++) {
int old = atomicCAS(&ht_.key[h], EMPTY_BUCKET_32, v_gindex);
if (old == EMPTY_BUCKET_32) {
v_addr = h;
return true;
}
else if (v_gindex == old) {
// vertex key already in table
return false;
}
else {
// step with linear probing
h = (h + e*e) % ht_.t_size;
e = e + 1;
//if (h == start_address) {
// printf("ERROR: can't find free bucket for %d\n", v_gindex);
// return false;
//}
}
}
return false;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// a probably faster strategy to reduce number of glabal memory access
// This function sets vertex and normal using an atomic counter.
// Keep the address where vertex was stored, therefore the hast table knows the address of vertices and normals in vertex and normal arrays
// It resturn the address in the hash table where the address of vertex and normal are stored
__device__ int insert_vertex(const int v_gindex, VertexHashTable ht_, Vertices v_, const float4 vc, const float4 vn)
{
const int start_address = int(hash_function((uint)v_gindex) % ht_.t_size);
// open hashing
int h = start_address;
int e = 1;
for (int loop = 0; loop < 128; loop++) {
int old = atomicCAS(&ht_.key[h], EMPTY_BUCKET_32, v_gindex);
if (old == EMPTY_BUCKET_32) {
const int a_ = atomicAdd(v_.t_size, 1);
v_.vertices[a_] = vc;
v_.normals[a_] = vn;
ht_.addr[h] = a_;
return h;
}
else if (v_gindex == old) {
// vertex key already in table
return h;
}
else {
// step with linear probing
h = (h + e*e) % ht_.t_size;
e = e + 1;
if (h == start_address) {
printf("ERROR: can't find free bucket for %d\n", v_gindex);
return -1;
}
}
}
return -1;
}
__device__ int insert_vertex_fast(const int v_gindex, VertexHashTable ht_, Vertices v_,int& address)
{
const int start_address = int(hash_function((uint)v_gindex) % ht_.t_size);
// open hashing
int h = start_address;
int e = 1;
for (int loop = 0; loop < 128; loop++) {
int old = atomicCAS(&ht_.key[h], EMPTY_BUCKET_32, v_gindex);
if (old == EMPTY_BUCKET_32) {
const int a_ = atomicAdd(v_.t_size, 1);
//v_.vertices[a_] = vc;
//v_.normals[a_] = vn;
ht_.addr[h] = a_;
address = a_;
return h;
}
else if (v_gindex == old) {
// vertex key already in table
address = -1;
return h;
}
else {
// step with linear probing
h = (h + e*e) % ht_.t_size;
e = e + 1;
if (h == start_address) {
printf("ERROR: can't find free bucket for %d\n", v_gindex);
return -1;
}
}
}
return -1;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// find vertex global index in hash table
// Values were store at the hash address with open hashing and
__device__ int find_vertex(const int gl_index, VertexHashTable ht_)
{
// compute hash for global index
const int pos = int(hash_function((uint)gl_index) % ht_.t_size);
// open hashing with quadratic probing
int h = pos;
int e = 1;
for (int loop = 0; loop < 128; loop++) {
if (ht_.key[h] == gl_index) {
return ht_.addr[h];
}
else {
// step with linear probing
h = (h + e*e) % ht_.t_size;
e = e + 1;
}
}
printf("ERROR: can't find gl_index in hash table: gl_index %d at %d\n",gl_index, pos);
return -1;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 64 bit hash table to use Cantor's pairing function
// 64 bit mix function
__device__ unsigned long long hash64shift(unsigned long long key)
{
key = (~key) + (key << 21); // key = (key << 21) - key - 1;
key = key ^ (key >> 24);
key = (key + (key << 3)) + (key << 8); // key * 265
key = key ^ (key >> 14);
key = (key + (key << 2)) + (key << 4); // key * 21
key = key ^ (key >> 28);
key = key + (key << 31);
return key;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Insert the unique id of a halfedge obtained using the bijective Cantor's pairing function
// into the hast table. At the same position in array, save actuall have edge address, which
// will be used later to connect twin edges.
__device__ bool insert_halfedge_id(const int t_size, unsigned long long *he_table, int2* he_ids, int he_addr, int v0, int v1)
{
//const unsigned long long EMPTY_BUCKET = 0ull;
// compute pairing function value
unsigned long long x = (unsigned long long)v0;
unsigned long long y = (unsigned long long)v1;
unsigned long long he_id = (x < y) ? y : x;
he_id = he_id + (x + y) * (x + y + 1) / 2ull;
// evalue hash function
unsigned long long l_size = (unsigned long long)t_size;
//unsigned long long he_id = (v0 < v1) ? (unsigned long long)v0 | ((unsigned long long)v1 << 32) : (unsigned long long)v1 | ((unsigned long long)v0 << 32);
//const int start_address = int(hash64shift(he_id) % l_size);
const int start_address = int( he_id % l_size );
// open hashing
int h = start_address;
int e = 1;
for (int loop = 0; loop < 128; loop++) {
unsigned long long old = atomicCAS(&he_table[h], EMPTY_BUCKET_64, he_id);
if (old == EMPTY_BUCKET_64 || old == he_id) {
if (v0 < v1) {
he_ids[h].x = he_addr;
}
else {
he_ids[h].y = he_addr;
}
return true;
}
else {
// step with linear probing
h = (h + e*e) % t_size;
e = e + 1;
if (h == start_address) {
printf("ERROR: he can't find free bucket for %d\n", he_id);
return false;
}
}
}
return false;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute the cell vertices from uniform grid and cell indices
// Use spacing to compute vertex position
__device__ void cell_vertices(float3 v[8], const int i, const int j, const int k, UGrid ugrid)
{
v[0].x = ugrid.x0 + i * ugrid.dx;
v[0].y = ugrid.y0 + j * ugrid.dy;
v[0].z = ugrid.z0 + k * ugrid.dz;
v[1].x = v[0].x + ugrid.dx;
v[1].y = v[0].y;
v[1].z = v[0].z;
v[2].x = v[0].x;
v[2].y = v[0].y + ugrid.dy;
v[2].z = v[0].z;
v[3].x = v[0].x + ugrid.dx;
v[3].y = v[0].y + ugrid.dy;
v[3].z = v[0].z;
v[4].x = v[0].x;
v[4].y = v[0].y;
v[4].z = v[0].z + ugrid.dz;
v[5].x = v[0].x + ugrid.dx;
v[5].y = v[0].y;
v[5].z = v[0].z + ugrid.dz;
v[6].x = v[0].x;
v[6].y = v[0].y + ugrid.dy;
v[6].z = v[0].z + ugrid.dz;
v[7].x = v[0].x + ugrid.dx;
v[7].y = v[0].y + ugrid.dy;
v[7].z = v[0].z + ugrid.dz;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute gradient of scalar field at the vertices
// Use central differences, at the boundaries use forward
// or backward differences correspondigly
__device__ void gradient(float3 n[8], cudaTextureObject_t u, UGrid ugrid, const int i_index, const int j_index, const int k_index)
{
const int idim = ugrid.idim;
const int jdim = ugrid.jdim;
const int kdim = ugrid.kdim;
const float dx = ugrid.dx;
const float dy = ugrid.dy;
const float dz = ugrid.dz;
int v0, v1;
float f = 2.f;
auto u_index = [](const int dim, int i, float& f) {
f = (i<0) || (i >= dim) ? 1 : 2;
i = (i<0) ? 0 : i;
i = (i >= dim) ? dim - 1 : i;
return i;
};
// 8 vertices
// v0, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[0].x = (tex3D<float>(u, v1, j_index, k_index) - tex3D<float>(u, v0, j_index, k_index)) / (f * dx);
// v0, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[0].y = (tex3D<float>(u, i_index, v1, k_index) - tex3D<float>(u, i_index, v0, k_index)) / (f * dy);
// v0, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1;
n[0].z = (tex3D<float>(u, i_index, j_index, v1) - tex3D<float>(u, i_index, j_index, v0)) / (f * dz);
// v1, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[1].x = (tex3D<float>(u, v1, j_index, k_index) - tex3D<float>(u, v0, j_index, k_index)) / (f * dx);
// v1, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[1].y = (tex3D<float>(u, i_index+1, v1, k_index) - tex3D<float>(u, i_index+1, v0, k_index)) / (f * dy);
// v1, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1;
n[1].z = (tex3D<float>(u, i_index+1, j_index, v1) - tex3D<float>(u, i_index+1, j_index, v0)) / (f * dz);
// v2, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[2].x = (tex3D<float>(u, v1, j_index+1, k_index) - tex3D<float>(u, v0, j_index+1, k_index)) / (f * dx);
// v2, y
v0 = j_index;
v1 = u_index(jdim,j_index + 2, f);
n[2].y = (tex3D<float>(u, i_index, v1, k_index) - tex3D<float>(u, i_index, v0, k_index)) / (f * dy);
// v2, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1, f;
n[2].z = (tex3D<float>(u, i_index, j_index+1, v1) - tex3D<float>(u, i_index, j_index+1, v0)) / (f * dz);
// v3, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[3].x = (tex3D<float>(u, v1, j_index+1, k_index) - tex3D<float>(u, v0, j_index+1, k_index)) / (f * dx);
// v3, y
v0 = j_index;
v1 = u_index(jdim, j_index + 2, f);
n[3].y = (tex3D<float>(u, i_index+1, v1, k_index) - tex3D<float>(u, i_index+1, v0, k_index)) / (f * dy);
// v3, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1;
n[3].z = (tex3D<float>(u, i_index+1, j_index+1, v1) - tex3D<float>(u, i_index+1, j_index+1, v0)) / (f * dz);
// v4, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[4].x = (tex3D<float>(u, v1, j_index, k_index+1) - tex3D<float>(u, v0, j_index, k_index+1)) / (f * dx);
// v4, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[4].y = (tex3D<float>(u, i_index, v1, k_index+1) - tex3D<float>(u, i_index, v0, k_index+1)) / (f * dy);
// v4, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[4].z = (tex3D<float>(u, i_index, j_index, v1) - tex3D<float>(u, i_index, j_index, v0)) / (f * dz);
// v5, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[5].x = (tex3D<float>(u, v1, j_index, k_index+1) - tex3D<float>(u, v0, j_index, k_index+1)) / (f * dx);
// v5, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[5].y = (tex3D<float>(u, i_index+1, v1, k_index+1) - tex3D<float>(u, i_index+1, v0, k_index+1)) / (f * dy);
// v5, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[5].z = (tex3D<float>(u, i_index+1, j_index, v1) - tex3D<float>(u, i_index+1, j_index, v0)) / (f * dz);
// v6, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[6].x = (tex3D<float>(u, v1, j_index+1, k_index+1) - tex3D<float>(u, v0, j_index+1, k_index+1)) / (f * dx);
// v6, y
v0 = j_index;
v1 = u_index(jdim, j_index + 2, f);
n[6].y = (tex3D<float>(u, i_index, v1, k_index+1) - tex3D<float>(u, i_index, v0, k_index+1)) / (f * dy);
// v6, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[6].z = (tex3D<float>(u, i_index, j_index+1, v1) - tex3D<float>(u, i_index, j_index+1, v0)) / (f * dz);
// v7, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[7].x = (tex3D<float>(u, v1, j_index + 1, k_index + 1) - tex3D<float>(u, v0, j_index + 1, k_index + 1)) / (f * dx);
// v7, y
v0 = j_index;
v1 = u_index(jdim, j_index + 2, f);
n[7].y = (tex3D<float>(u, i_index + 1, v1, k_index + 1) - tex3D<float>(u, i_index + 1, v0, k_index + 1)) / (f * dy);
// v7, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[7].z = (tex3D<float>(u, i_index+1, j_index+1, v1) - tex3D<float>(u, i_index + 1, j_index + 1, v0)) / (f * dz);
}
__device__ void gradientShared(const int tr, float3 n[8], cudaTextureObject_t u, UGrid ugrid, const int i_index, const int j_index, const int k_index)
{
const int idim = ugrid.idim;
const int jdim = ugrid.jdim;
const int kdim = ugrid.kdim;
const float dx = ugrid.dx;
const float dy = ugrid.dy;
const float dz = ugrid.dz;
int v0, v1;
float f = 2.f;
auto u_index = [](const int dim, int i, float& f) {
f = (i<0) || (i >= dim) ? 1 : 2;
i = (i<0) ? 0 : i;
i = (i >= dim) ? dim - 1 : i;
return i;
};
// 8 vertices
// v0, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[tr].x = (tex3D<float>(u, v1, j_index, k_index) - tex3D<float>(u, v0, j_index, k_index)) / (f * dx);
// v0, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[tr].y = (tex3D<float>(u, i_index, v1, k_index) - tex3D<float>(u, i_index, v0, k_index)) / (f * dy);
// v0, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1;
n[tr].z = (tex3D<float>(u, i_index, j_index, v1) - tex3D<float>(u, i_index, j_index, v0)) / (f * dz);
// v1, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[tr+1].x = (tex3D<float>(u, v1, j_index, k_index) - tex3D<float>(u, v0, j_index, k_index)) / (f * dx);
// v1, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[tr + 1].y = (tex3D<float>(u, i_index + 1, v1, k_index) - tex3D<float>(u, i_index + 1, v0, k_index)) / (f * dy);
// v1, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1;
n[tr + 1].z = (tex3D<float>(u, i_index + 1, j_index, v1) - tex3D<float>(u, i_index + 1, j_index, v0)) / (f * dz);
// v2, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[tr + 2].x = (tex3D<float>(u, v1, j_index + 1, k_index) - tex3D<float>(u, v0, j_index + 1, k_index)) / (f * dx);
// v2, y
v0 = j_index;
v1 = u_index(jdim, j_index + 2, f);
n[tr + 2].y = (tex3D<float>(u, i_index, v1, k_index) - tex3D<float>(u, i_index, v0, k_index)) / (f * dy);
// v2, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1, f;
n[tr + 2].z = (tex3D<float>(u, i_index, j_index + 1, v1) - tex3D<float>(u, i_index, j_index + 1, v0)) / (f * dz);
// v3, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[tr + 3].x = (tex3D<float>(u, v1, j_index + 1, k_index) - tex3D<float>(u, v0, j_index + 1, k_index)) / (f * dx);
// v3, y
v0 = j_index;
v1 = u_index(jdim, j_index + 2, f);
n[tr + 3].y = (tex3D<float>(u, i_index + 1, v1, k_index) - tex3D<float>(u, i_index + 1, v0, k_index)) / (f * dy);
// v3, z
v0 = u_index(kdim, k_index - 1, f);
v1 = k_index + 1;
n[tr + 3].z = (tex3D<float>(u, i_index + 1, j_index + 1, v1) - tex3D<float>(u, i_index + 1, j_index + 1, v0)) / (f * dz);
// v4, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[tr + 4].x = (tex3D<float>(u, v1, j_index, k_index + 1) - tex3D<float>(u, v0, j_index, k_index + 1)) / (f * dx);
// v4, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[tr + 4].y = (tex3D<float>(u, i_index, v1, k_index + 1) - tex3D<float>(u, i_index, v0, k_index + 1)) / (f * dy);
// v4, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[tr + 4].z = (tex3D<float>(u, i_index, j_index, v1) - tex3D<float>(u, i_index, j_index, v0)) / (f * dz);
// v5, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[tr + 5].x = (tex3D<float>(u, v1, j_index, k_index + 1) - tex3D<float>(u, v0, j_index, k_index + 1)) / (f * dx);
// v5, y
v0 = u_index(jdim, j_index - 1, f);
v1 = j_index + 1;
n[tr + 5].y = (tex3D<float>(u, i_index + 1, v1, k_index + 1) - tex3D<float>(u, i_index + 1, v0, k_index + 1)) / (f * dy);
// v5, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[tr + 5].z = (tex3D<float>(u, i_index + 1, j_index, v1) - tex3D<float>(u, i_index + 1, j_index, v0)) / (f * dz);
// v6, x
v0 = u_index(idim, i_index - 1, f);
v1 = i_index + 1;
n[tr + 6].x = (tex3D<float>(u, v1, j_index + 1, k_index + 1) - tex3D<float>(u, v0, j_index + 1, k_index + 1)) / (f * dx);
// v6, y
v0 = j_index;
v1 = u_index(jdim, j_index + 2, f);
n[tr + 6].y = (tex3D<float>(u, i_index, v1, k_index + 1) - tex3D<float>(u, i_index, v0, k_index + 1)) / (f * dy);
// v6, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[tr + 6].z = (tex3D<float>(u, i_index, j_index + 1, v1) - tex3D<float>(u, i_index, j_index + 1, v0)) / (f * dz);
// v7, x
v0 = i_index;
v1 = u_index(idim, i_index + 2, f);
n[tr + 7].x = (tex3D<float>(u, v1, j_index + 1, k_index + 1) - tex3D<float>(u, v0, j_index + 1, k_index + 1)) / (f * dx);
// v7, y
v0 = j_index;
v1 = u_index(jdim, j_index + 2, f);
n[tr + 7].y = (tex3D<float>(u, i_index + 1, v1, k_index + 1) - tex3D<float>(u, i_index + 1, v0, k_index + 1)) / (f * dy);
// v7, z
v0 = k_index;
v1 = u_index(kdim, k_index + 2, f);
n[tr + 7].z = (tex3D<float>(u, i_index + 1, j_index + 1, v1) - tex3D<float>(u, i_index + 1, j_index + 1, v0)) / (f * dz);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// old fashined way to compute the gradient from a uniform grid
__device__ void gradient2(float3 n[8], cudaTextureObject_t u, const UGrid ugrid, const int i_index, const int j_index, const int k_index)
{
for (int _k = 0; _k <= 1; _k++) {
int k = k_index + _k;
for (int _j = 0; _j <= 1; _j++) {
int j = j_index + _j;
for (int _i = 0; _i <= 1; _i++) {
int i = i_index + _i;
// set gradient at vertex
unsigned int v_index{ 0 };
v_index |= (_i) & 1;
v_index |= (_j << 1) & 2;
v_index |= (_k << 2) & 4;
// x-component
float factor{ 1.f };
int i1{ 0 };
int i2{ 0 };
if (i == 0) {
i1 = i;
i2 = i + 1;
}
else if (i == ugrid.idim - 1) {
i1 = i - 1;
i2 = i;
}
else {
i1 = i - 1;
i2 = i + 1;
factor = 2.f;
}
n[v_index].x = (tex3D<float>(u, i2, j, k) - tex3D<float>(u, i1, j, k)) / (factor * ugrid.dx);
// y-component
factor = 1.f;
if (j == 0) {
i1 = j;
i2 = j + 1;
}
else if (j == ugrid.jdim - 1) {
i1 = j - 1;
i2 = j;
}
else {
i1 = j - 1;
i2 = j + 1;
factor = 2.f;
}
n[v_index].y = (tex3D<float>(u, i, i2, k) - tex3D<float>(u, i, i1, k)) / (factor * ugrid.dy);
// z-component
factor = 1.f;
if (k == 0) {
i1 = k;
i2 = k + 1;
}
else if (k == ugrid.kdim - 1) {
i1 = k - 1;
i2 = k;
}
else {
i1 = k - 1;
i2 = k + 1;
factor = 2.f;
}
n[v_index].z = (tex3D<float>(u, i, j, i2) - tex3D<float>(u, i, j, i1)) / (factor * ugrid.dz);
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// CUDA GLOBAL FUNCTIONS
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// init hash table
__global__ void init_hash_table(VertexHashTable ht_)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= ht_.t_size)
return;
ht_.key[tid] = EMPTY_BUCKET_32;
//ht_.addr[tid] = -1;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// TOPLOGICALLY CORRECT MARCHING CUBES
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Marching Cubes
// Count cells being intersected by isosurface
// Coompute a nr. of vertices
__global__ void mc_count(CellsIds cells, AmbiguousCells acells, int* v_count, int* a_count, float i0, cudaTextureObject_t v_data, UGrid ugrid, MC_lookup l_tables)
{
const int gl_index = blockIdx.x * blockDim.x + threadIdx.x;
const int i = ugrid.i_index(gl_index);
const int j = ugrid.j_index(gl_index);
const int k = ugrid.k_index(gl_index);
if (i >= ugrid.idim - 1 || j >= ugrid.jdim - 1 || k >= ugrid.kdim - 1) {
return;
}
// scalar values at vertices
float u[8];
u[0] = tex3D<float>(v_data, i, j, k);
u[1] = tex3D<float>(v_data, i + 1, j, k);
u[2] = tex3D<float>(v_data, i, j + 1, k);
u[3] = tex3D<float>(v_data, i + 1, j + 1, k);
u[4] = tex3D<float>(v_data, i, j, k + 1);
u[5] = tex3D<float>(v_data, i + 1, j, k + 1);
u[6] = tex3D<float>(v_data, i, j + 1, k + 1);
u[7] = tex3D<float>(v_data, i + 1, j + 1, k + 1);
// compute case
uint i_case{ 0 };
i_case = i_case + ((uint)(u[0] >= i0));
i_case = i_case + ((uint)(u[1] >= i0)) * 2;
i_case = i_case + ((uint)(u[2] >= i0)) * 4;
i_case = i_case + ((uint)(u[3] >= i0)) * 8;
i_case = i_case + ((uint)(u[4] >= i0)) * 16;
i_case = i_case + ((uint)(u[5] >= i0)) * 32;
i_case = i_case + ((uint)(u[6] >= i0)) * 64;
i_case = i_case + ((uint)(u[7] >= i0)) * 128;
// compute number of vertices computed for this cell
const ushort e_ = l_tables.e_[i_case];
int nr_vertices = numberOfSetBits<ushort>(e_);
// copy into global memory
if (nr_vertices > 0) {
// get an address
if (e_ & BIT_16) {
atomicAdd(a_count, nr_vertices);
//acells.add(gl_index);
acells.a_cells[atomicAdd(acells.t_size, 1)] = gl_index;
}
else {
atomicAdd(v_count, nr_vertices);
cells.cells_[atomicAdd(cells.t_size,1)] = gl_index;
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Based on standard Marching Cubes compute cell triangulation for non ambiguous cases
// Save cell global id to process ambiguous cases later
// Parameters
// @param i0 is the iso-value
// @param v_data is a 3D texture with the volume data
// @param ugird contains information describing the uniform grid
// @param l_tables is a structure with pointers to the lookup table for MC
// struct MC_lookup {
// unsigned short* e_pattern;
// int* t_pattern;
// uint* t_ambig;
// };
// @param nr_cells is to the total nr.of cells intersected by the iso - surfece
// @param cellid is a field with the global id of the cells in the uniform grid
// @param c_addr is a atomic counter to compute the index in the array a_cells with ids of the cells which have an ambiguous case
// @param a_cell a point to a field containing the ids of the ambiguous cases to be processed later
// @param ht_ hash table to compute unique vertex index
// @param v_ a structure containing all pointer required for vertex processing
// struct Vertices {
// float4* vertices{ nullptr };
// float4* normals{ nullptr };
// int* t_size{ nullptr };
// };
// @param t_ a structure containing all pointer required for triangle processing
// struct Triangles {
// int3* triangles{ nullptr };
// int* t_size{ nullptr };
// };
__global__ void mc_slice(const float i0, cudaTextureObject_t v_data, UGrid ugrid, MC_lookup l_tables, const int nr_cells, CellsIds cells, VertexHashTable ht_, Vertices v_, Triangles t_)
{
__shared__ int4 tris[5 * MC_BLOCKSIZE];
// get thread id
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int bz = blockDim.x;
const int tr = threadIdx.x;
if (nr_cells <= tid)
return;
for (int i = tr; i < 5 * bz; i += bz) {
tris[i].x = -1;
}
//__syncthreads();
// compute grid indices from global index
const int gl_index = cells.cells_[tid];
const int i_index = ugrid.i_index(gl_index);
const int j_index = ugrid.j_index(gl_index);
const int k_index = ugrid.k_index(gl_index);
// get address to store vertices
//const int vtpos = cells.vtpos_[tid];
// construct 8 cell vertices
float3 v[8];
cell_vertices(v, i_index, j_index, k_index, ugrid);
// scalar values at vertices
float u[8];
u[0] = tex3D<float>(v_data, i_index, j_index, k_index);
u[1] = tex3D<float>(v_data, i_index + 1, j_index, k_index);
u[2] = tex3D<float>(v_data, i_index, j_index + 1, k_index);
u[3] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index);
u[4] = tex3D<float>(v_data, i_index, j_index, k_index + 1);
u[5] = tex3D<float>(v_data, i_index + 1, j_index, k_index + 1);
u[6] = tex3D<float>(v_data, i_index, j_index + 1, k_index + 1);
u[7] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index + 1);
// compute normals at vertices
float3 n[8];
gradient(n, v_data, ugrid, i_index, j_index, k_index);
// compute case
uint i_case{ 0 };
i_case = i_case + ((uint)(u[0] >= i0));
i_case = i_case + ((uint)(u[1] >= i0)) * 2;
i_case = i_case + ((uint)(u[2] >= i0)) * 4;
i_case = i_case + ((uint)(u[3] >= i0)) * 8;
i_case = i_case + ((uint)(u[4] >= i0)) * 16;
i_case = i_case + ((uint)(u[5] >= i0)) * 32;
i_case = i_case + ((uint)(u[6] >= i0)) * 64;
i_case = i_case + ((uint)(u[7] >= i0)) * 128;
// ambiguous cases are processed in the next pass
const ushort e_ = l_tables.e_[i_case];
// Compute intersection with edges
const unsigned long long gei_pattern_ = 670526590282893600ull;
const unsigned char l_edges_[12]{ 16, 49, 50, 32, 84, 117, 118, 100, 64, 81, 115, 98 };
int v_gindex[12];
ushort flag{ 1 };
//const ushort e_pattern = (ushort)l_tables.ePattern(i_case); // l_tables.e_pattern[i_case];
for (int e = 0; e < 12; e++) {
v_gindex[e] = -1;
if (flag & e_) {
// get unique vertex index
// compute vertex global index
const int ix = i_index + (int)((gei_pattern_ >> 5 * e) & 1); // global_edge_id[eg][0];
const int iy = j_index + (int)((gei_pattern_ >> (5 * e + 1)) & 1); // global_edge_id[eg][1];
const int iz = k_index + (int)((gei_pattern_ >> (5 * e + 2)) & 1); // global_edge_id[eg][2];
const int off_val = (int)((gei_pattern_ >> (5 * e + 3)) & 3);
int address{ -1 };
v_gindex[e] = insert_vertex_fast(int(9 * ugrid.gl_index(ix, iy, iz) + off_val), ht_, v_,address);
if (address > -1) {
// compute edge inersection
// compute local coordinate along edge
const int v0 = (l_edges_[e] & 0xF);
const int v1 = (l_edges_[e] >> 4) & 0xF;
const float l = (i0 - u[v0]) / (u[v1] - u[v0]);
float4 vp = make_float4(v[v0].x + l*(v[v1].x - v[v0].x), v[v0].y + l*(v[v1].y - v[v0].y), v[v0].z + l*(v[v1].z - v[v0].z), 1.f);
float4 np = make_float4(n[v0].x + l*(n[v1].x - n[v0].x), n[v0].y + l*(n[v1].y - n[v0].y), n[v0].z + l*(n[v1].z - n[v0].z), 0.f);
const float length = std::sqrt(np.x * np.x + np.y * np.y + np.z * np.z);
np.x = np.x / length;
np.y = np.y / length;
np.z = np.z / length;
//np.w = 0.f;
v_.vertices[address] = vp;
v_.normals[address] = np;
}
}
flag <<= 1;
}
// compute triangles
//const unsigned char* t_ambig = l_tables.t_ambig;
unsigned long long tl_ = l_tables.t_[i_case];
for (int t = 0; t < 16; t += 3) {
const int v0 = (int)((tl_ >> (4 * t)) & 0xFull);
//if (((tl_ >> (4*t))& 0xFull) == 0xF) {
if (v0 == 0xF) {
// there are no more triangles
break;
}
// save tirangle
//const int v0 = (int)((tl_ >> (4 * t)) & 0xFull);
const int v1 = (int)((tl_ >> (4 * (t + 1))) & 0xFull);
const int v2 = (int)((tl_ >> (4 * (t + 2))) & 0xFull);
//t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[v0], v_gindex[v1], v_gindex[v2], 0);
tris[tr + (t/3) * bz] = make_int4(v_gindex[v0], v_gindex[v1], v_gindex[v2], 0);
//t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[((tl_ >> (4 * t)) & 0xFull)], v_gindex[((tl_ >> (4 * (t + 1))) & 0xFull)], v_gindex[((tl_ >> (4 * (t + 2))) & 0xFull)], 0);
}
__syncthreads();
// write tris
for (int i = tr; i < 5 * bz; i += bz) {
if (tris[i].x > -1) {
t_.triangles[atomicAdd(t_.t_size, 1)] = tris[i];
}
//__syncthreads();
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute the triangulation of a cell with an ambiguous case
__global__ void t_slice(const float i0, cudaTextureObject_t v_data, UGrid ugrid, MC_lookup l_tables, const int nr_cells, AmbiguousCells acells, VertexHashTable ht_, Vertices v_, Triangles t_)
{
__shared__ float3 n[8 * AMB_BLOCKSIZE];
__shared__ float3 p[8 * AMB_BLOCKSIZE];
// get cell id
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (nr_cells <= tid)
return;
// compute grid indices from global index
const int gl_index = acells.a_cells[tid];
const int i_index = ugrid.i_index(gl_index);
const int j_index = ugrid.j_index(gl_index);
const int k_index = ugrid.k_index(gl_index);
const int tr = threadIdx.x * 8;
// construct 8 cell vertices
//float3 p[8];
//cell_vertices(p, i_index, j_index, k_index, ugrid);
p[tr].x = ugrid.x0 + i_index * ugrid.dx;
p[tr].y = ugrid.y0 + j_index * ugrid.dy;
p[tr].z = ugrid.z0 + k_index * ugrid.dz;
p[tr + 1].x = p[tr].x + ugrid.dx;
p[tr + 1].y = p[tr].y;
p[tr + 1].z = p[tr].z;
p[tr + 2].x = p[tr].x;
p[tr + 2].y = p[tr].y + ugrid.dy;
p[tr + 2].z = p[tr].z;
p[tr + 3].x = p[tr].x + ugrid.dx;
p[tr + 3].y = p[tr].y + ugrid.dy;
p[tr + 3].z = p[tr].z;
p[tr + 4].x = p[tr].x;
p[tr + 4].y = p[tr].y;
p[tr + 4].z = p[tr].z + ugrid.dz;
p[tr + 5].x = p[tr].x + ugrid.dx;
p[tr + 5].y = p[tr].y;
p[tr + 5].z = p[tr].z + ugrid.dz;
p[tr + 6].x = p[tr].x;
p[tr + 6].y = p[tr].y + ugrid.dy;
p[tr + 6].z = p[tr].z + ugrid.dz;
p[tr + 7].x = p[tr].x + ugrid.dx;
p[tr + 7].y = p[tr].y + ugrid.dy;
p[tr + 7].z = p[tr].z + ugrid.dz;
// scalar values at vertices
float F[8];
F[0] = tex3D<float>(v_data, i_index, j_index, k_index);
F[1] = tex3D<float>(v_data, i_index + 1, j_index, k_index);
F[2] = tex3D<float>(v_data, i_index, j_index + 1, k_index);
F[3] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index);
F[4] = tex3D<float>(v_data, i_index, j_index, k_index + 1);
F[5] = tex3D<float>(v_data, i_index + 1, j_index, k_index + 1);
F[6] = tex3D<float>(v_data, i_index, j_index + 1, k_index + 1);
F[7] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index + 1);
// compute normals at vertices
//float3 n[8];
gradientShared(tr, n, v_data, ugrid, i_index, j_index, k_index);
__syncthreads();
// compute case
uint i_case{ 0 };
i_case = i_case + ((uint)(F[0] >= i0));
i_case = i_case + ((uint)(F[1] >= i0)) * 2;
i_case = i_case + ((uint)(F[2] >= i0)) * 4;
i_case = i_case + ((uint)(F[3] >= i0)) * 8;
i_case = i_case + ((uint)(F[4] >= i0)) * 16;
i_case = i_case + ((uint)(F[5] >= i0)) * 32;
i_case = i_case + ((uint)(F[6] >= i0)) * 64;
i_case = i_case + ((uint)(F[7] >= i0)) * 128;
// Compute intersection with edges
const unsigned long long gei_pattern_ = 670526590282893600ull;
const unsigned char l_edges_[12]{ 16, 49, 50, 32, 84, 117, 118, 100, 64, 81, 115, 98 };
// compute intersection with cell edges
float ecoord[12]{};
int v_gindex[12]{};
ushort flag{ 1 };
ushort e_ = l_tables.e_[i_case];
for (int e = 0; e < 12; e++) {
v_gindex[e] = -1;
//ecoord[e] = 0.f;
if (flag & e_) {
// get unique vertex index
// compute vertex global index
const int ix = i_index + (int)((gei_pattern_ >> 5 * e) & 1); // global_edge_id[eg][0];
const int iy = j_index + (int)((gei_pattern_ >> (5 * e + 1)) & 1); // global_edge_id[eg][1];
const int iz = k_index + (int)((gei_pattern_ >> (5 * e + 2)) & 1); // global_edge_id[eg][2];
const int off_val = (int)((gei_pattern_ >> (5 * e + 3)) & 3);
int address{ -1 };
v_gindex[e] = insert_vertex_fast(int(9 * ugrid.gl_index(ix, iy, iz) + off_val), ht_, v_,address);
// compute edge inersection
// compute local coordinate along edge
const int v0 = (l_edges_[e] & 0xF);
const int v1 = (l_edges_[e] >> 4) & 0xF;
const float l = (i0 - F[v0]) / (F[v1] - F[v0]);
if (address > -1) {
v_.vertices[address] = make_float4(p[tr + v0].x + l*(p[tr + v1].x - p[tr + v0].x), p[tr + v0].y + l*(p[tr + v1].y - p[tr + v0].y), p[tr + v0].z + l*(p[tr + v1].z - p[tr + v0].z), 1.f);
float4 np = make_float4(n[tr + v0].x + l*(n[tr + v1].x - n[tr + v0].x), n[tr + v0].y + l*(n[tr + v1].y - n[tr + v0].y), n[tr + v0].z + l*(n[tr + v1].z - n[v0].z), 0.f);
const float length = std::sqrt(np.x * np.x + np.y * np.y + np.z * np.z);
np.x = np.x / length;
np.y = np.y / length;
np.z = np.z / length;
v_.normals[address] = np;
}
//v_gindex[e] = v_.add(ht_, int(9 * ugrid.gl_index(ix, iy, iz) + off_val), vp, np);
// remember local coordinate along edge
ecoord[e] = l;
}
flag <<= 1;
}
// compute oriented contours
// 1. build segments
// 2. connect segments
// build up segments
// set segments map
unsigned char segm_[12] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
auto set_segm = [](const int ei, const int eo, unsigned char segm_[12]) {
segm_[ei] &= 0xF0;
segm_[ei] |= ((unsigned char)eo) & 0xF;
segm_[eo] &= 0xF;
segm_[eo] |= ((unsigned char)ei) << 4;
};
auto get_segm = [](const int e, const int pos, unsigned char segm_[12]) {
if (pos == 0)
return (int)(segm_[e] & 0xF);
else
return (int)((segm_[e] >> 4) & 0xF);
};
auto is_segm_set = [](const int e, unsigned char segm_[12]) {
return (segm_[e] != 0xFF);
};
auto unset_segm = [](const int e, unsigned char segm_[12]) {
segm_[e] = 0xFF;
};
// In order to compute oriented segments, the hexahedron has to be flatten.
// The insides of the faces of the hexahedron have to be all at the same
// side of the flattend hexa. This requires changing the order of the
// edges when reading from the faces
// code edges at face
unsigned short e_face_[6]{ (ushort)291, (ushort)18277, (ushort)18696, (ushort)10859, (ushort)33719, (ushort)38305 };
// code vertices at face
unsigned short v_face_[6]{ (ushort)12576, (ushort)25717, (ushort)5380, (ushort)29538, (ushort)8292, (ushort)30001 };
// reading edge from face
auto get_face_e = [e_face_](const int f, const int e) { return ((e_face_[f] >> (4 * e)) & 0xF); };
auto get_face_v = [v_face_](const int f, const int e) { return ((v_face_[f] >> (4 * e)) & 0xF); };
// compute oriented segments using the isoline scheme at the faces
auto asymptotic_decider = [](const float f0, const float f1, const float f2, const float f3) {
return (f0*f3 - f1*f2) / (f0 + f3 - f1 - f2);
};
uchar f_flag{ 0 };
for (int f = 0; f < 6; f++) {
// classify face
unsigned int f_case{ 0 };
const int v0 = get_face_v(f, 0);
const int v1 = get_face_v(f, 1);
const int v2 = get_face_v(f, 2);
const int v3 = get_face_v(f, 3);
const int e0 = get_face_e(f, 0);
const int e1 = get_face_e(f, 1);
const int e2 = get_face_e(f, 2);
const int e3 = get_face_e(f, 3);
const float f0 = F[v0];
const float f1 = F[v1];
const float f2 = F[v2];
const float f3 = F[v3];
if (f0 >= i0)
f_case |= BIT_1;
if (f1 >= i0)
f_case |= BIT_2;
if (f2 >= i0)
f_case |= BIT_3;
if (f3 >= i0)
f_case |= BIT_4;
switch (f_case)
{
case 1:
set_segm(e0, e3, segm_);
break;
case 2:
set_segm(e1, e0, segm_);
break;
case 3:
set_segm(e1, e3, segm_);
break;
case 4:
set_segm(e3, e2, segm_);
break;
case 5:
set_segm(e0, e2, segm_);
break;
case 6:
{
const float val = asymptotic_decider(f0, f1, f2, f3);
if (val > i0) {
set_segm(e3, e0, segm_);
set_segm(e1, e2, segm_);
}
else if (val < i0) {
set_segm(e1, e0, segm_);
set_segm(e3, e2, segm_);
}
else {
// set flag for this face
f_flag |= (1 << f);
float ec0 = ecoord[e0];
float ec1 = ecoord[e1];
float ec2 = ecoord[e2];
float ec3 = ecoord[e3];
if ((0x218 >> (f * 2)) & BIT_1) {
ec0 = 1 - ec0;
ec2 = 1 - ec2;
}
if ((0x218 >> (f * 2)) & BIT_2) {
ec1 = 1 - ec1;
ec3 = 1 - ec3;
}
if (ec1 < ec3 && ec0 > ec2) {
set_segm(e1, e0, segm_);
set_segm(e3, e2, segm_);
}
else if (ec1 > ec3 && ec0 < ec2) {
set_segm(e3, e0, segm_);
set_segm(e1, e2, segm_);
}
else {
return;
}
}
}
break;
case 7:
set_segm(e1, e2, segm_);
break;
case 8:
set_segm(e2, e1, segm_);
break;
case 9:
{
const double val = asymptotic_decider(f0, f1, f2, f3);
if (val > i0) {
set_segm(e0, e1, segm_);
set_segm(e2, e3, segm_);
}
else if (val < i0) {
set_segm(e0, e3, segm_);
set_segm(e2, e1, segm_);
}
else {
f_flag = (1 << f);
// singular case val == i0, there are no asymptotes
// check if there is a reasonable triangulation of the face
float ec0 = ecoord[e0];
float ec1 = ecoord[e1];
float ec2 = ecoord[e2];
float ec3 = ecoord[e3];
if ((0x218 >> (f * 2)) & BIT_1) {
ec0 = 1 - ec0;
ec2 = 1 - ec2;
}
if ((0x218 >> (f * 2)) & BIT_2) {
ec1 = 1 - ec1;
ec3 = 1 - ec3;
}
if (ec1 < ec3 && ec0 > ec2) {
set_segm(e0, e1, segm_);
set_segm(e2, e3, segm_);
}
else if (ec1 > ec3 && ec0 < ec2) {
set_segm(e0, e3, segm_);
set_segm(e2, e1, segm_);
}
else {
return;
}
}
}
break;
case 10:
set_segm(e2, e0, segm_);
break;
case 11:
set_segm(e2, e3, segm_);
break;
case 12:
set_segm(e3, e1, segm_);
break;
case 13:
set_segm(e0, e1, segm_);
break;
case 14:
set_segm(e3, e0, segm_);
break;
default:
break;
}
}
// connect oriented segments into oriented contours
// closed contours are coded in 64 bit unsigned long long
// 1) Each entry has 4 bits
// 2) The first 4 entries are reserved for the size of the contours
// 3) The next 12 entries are the indices of the edges constituting the contorus
// The indices are numbers from 0 to 12
unsigned long long c_ = 0xFFFFFFFFFFFF0000;
// in the 4 first bits store size of contours
auto get_cnt_size = [](const int cnt, unsigned long long &c_) {
return (size_t)((c_ & (0xF << 4 * cnt)) >> 4 * cnt);
};
auto set_cnt_size = [](const int cnt, const int size, unsigned long long &c_) {
// unset contour size
c_ &= ~(0xF << 4 * cnt);
c_ |= (size << 4 * cnt);
};
// set corresponging edge
auto set_c = [](const int cnt, const int pos, const int val, unsigned long long &c_) {
const uint mask[4] = { 0x0, 0xF, 0xFF, 0xFFF };
const uint c_sz = c_ & mask[cnt];
const uint e = 16 + 4 * ((c_sz & 0xF) + ((c_sz & 0xF0) >> 4) + ((c_sz & 0xF00) >> 8) + pos);
c_ &= ~(((unsigned long long)0xF) << e);
c_ |= (((unsigned long long)val) << e);
};
// read edge from contour
auto get_c = [](const int cnt, const int pos, unsigned long long c_) {
const uint mask[4] = { 0x0, 0xF, 0xFF, 0xFFF };
const uint c_sz = (uint)(c_ & mask[cnt]);
const uint e = 16 + 4 * ((c_sz & 0xF) + ((c_sz & 0xF0) >> 4) + ((c_sz & 0xF00) >> 8) + pos);
return (int)((c_ >> e) & 0xF);
};
// connect oriented contours
uint cnt_{ 0 };
for (uint e = 0; e < 12; e++) {
if (is_segm_set(e, segm_)) {
uint eTo = get_segm(e, 0, segm_);
uint eIn = get_segm(e, 1, segm_);
uint eStart = e;
uint pos = 0;
set_c(cnt_, pos, eStart, c_);
while (eTo != eStart) {
pos = pos + 1;
set_c(cnt_, pos, eTo, c_);
eIn = eTo;
eTo = get_segm(eIn, 0, segm_);
unset_segm(eIn, segm_);
}
// set contour length
set_cnt_size(cnt_, pos + 1, c_);
// update number of contours
cnt_ = cnt_ + 1;
}
}
// compute intersection of opposite faces
float ui[2]{};
float vi[2]{};
float wi[2]{};
unsigned char q_sol{ 0 };
const float a = (F[0] - F[1])*(-F[6] + F[7] + F[4] - F[5]) - (F[4] - F[5])*(-F[2] + F[3] + F[0] - F[1]);
const float b = (i0 - F[0])*(-F[6] + F[7] + F[4] - F[5]) + (F[0] - F[1])*(F[6] - F[4]) - (i0 - F[4])*(-F[2] + F[3] + F[0] - F[1]) - (F[4] - F[5])*(F[2] - F[0]);
const float c = (i0 - F[0])*(F[6] - F[4]) - (i0 - F[4])*(F[2] - F[0]);;
float d = b*b - 4 * a*c;
if (d > 0) {
d = std::sqrt(d);
// compute u-coord of solutions
ui[0] = (-b - d) / (2 * a);
ui[1] = (-b + d) / (2 * a);
// compute v-coord of solutions
float g1 = F[0] * (1 - ui[0]) + F[1] * ui[0];
float g2 = F[2] * (1 - ui[0]) + F[3] * ui[0];
vi[0] = (i0 - g1) / (g2 - g1);
if (isnan(vi[0]) || isinf(vi[0])) {
vi[0] = -1.f;
}
g1 = F[0] * (1 - ui[1]) + F[1] * ui[1];
g2 = F[2] * (1 - ui[1]) + F[3] * ui[1];
vi[1] = (i0 - g1) / (g2 - g1);
if (isnan(vi[1]) || isinf(vi[1])) {
vi[1] = -1.f;
}
// compute w-coordinates of solutions
g1 = F[0] * (1 - ui[0]) + F[1] * ui[0];
g2 = F[4] * (1 - ui[0]) + F[5] * ui[0];
wi[0] = (i0 - g1) / (g2 - g1);
if (isnan(wi[0]) || isinf(wi[0])) {
wi[0] = -1.f;
}
g1 = F[0] * (1 - ui[1]) + F[1] * ui[1];
g2 = F[4] * (1 - ui[1]) + F[5] * ui[1];
wi[1] = (i0 - g1) / (g2 - g1);
if (isnan(wi[1]) || isinf(wi[1])) {
wi[1] = -1.f;
}
// correct values for roots of quadratic equations
// in case the asymptotic decider has failed
if (f_flag & BIT_1) { // face 1, w = 0;
if (wi[0] < wi[1]) wi[0] = 0;
else wi[1] = 0;
}
if (f_flag & BIT_2) { // face 2, w = 1
if (wi[0] > wi[1]) wi[1] = 1;
else wi[1] = 1;
}
if (f_flag & BIT_3) { // face 3, v = 0
if (vi[0] < vi[1]) vi[0] = 0;
else vi[1] = 0;
}
if (f_flag & BIT_4) { // face 4, v = 1
if (vi[0] > vi[1]) vi[0] = 1;
else vi[1] = 1;
}
if (f_flag & BIT_5) { // face 5, u = 0
if (ui[0] < ui[1]) ui[0] = 0;
else ui[1] = 0;
}
if (f_flag & BIT_6) { // face 6, u = 1
if (ui[0] > ui[1]) ui[0] = 1;
else ui[1] = 1;
}
// check solution intervals
if (0 < ui[0] && ui[0] < 1) {
q_sol |= 1;
}
if (0 < ui[1] && ui[1] < 1) {
q_sol |= 2;
}
if (0 < vi[0] && vi[0] < 1) {
q_sol |= 4;
}
if (0 < vi[1] && vi[1] < 1) {
q_sol |= 8;
}
if (0 < wi[0] && wi[0] < 1) {
q_sol |= 16;
}
if (0 < wi[1] && wi[1] < 1) {
q_sol |= 32;
}
}
// compute the number of solutions to the quadratic equation for a given face
auto nrQSolFace = [](const uint f, const unsigned char n) {
uint nr{ 0 };
switch (f) {
case 0:
if ((n & 0x5) == 0x5)
nr = nr + 1;
if ((n & 0xA) == 0xA)
nr = nr + 1;
break;
case 1:
if ((n & 0x11) == 0x11) nr = nr + 1;
if ((n & 0x22) == 0x22) nr = nr + 1;
break;
case 2:
if ((n & 0x18) == 0x18) nr = nr + 1;
if ((n & 0x24) == 0x24) nr = nr + 1;
break;
}
return nr;
};
// triangulate contours
// if all bits are set, then there are three pairs of nontrivial solutions
// to the quadratic equations. In this case, there is a tunnel or a contour
// with 12 vertices. If there are three contours, then there is a tunnel and
// one of the contorus with only three vertices is not part of it.
// Triangles are stored in global memory starting at offset
// count nr. of inner vertices to compute right global index
// first inner vertex has index cell_global_index + 3;
int v_count{ 3 };
if (numberOfSetBits<unsigned char>(q_sol) == 6) {
// there are at most three contours
// Possible cases:
// 1) a single contour with 12 vertices
// 2) two contours which build a tunnel
// 3) three contours, one has only 3 vertices and does not belong to the tunnel
// construct the six vertices of the inner hexagon
float3 hvt[6];
hvt[0].x = ui[0]; hvt[0].y = vi[0]; hvt[0].z = wi[0];
hvt[1].x = ui[0]; hvt[1].y = vi[0]; hvt[1].z = wi[1];
hvt[2].x = ui[1]; hvt[2].y = vi[0]; hvt[2].z = wi[1];
hvt[3].x = ui[1]; hvt[3].y = vi[1]; hvt[3].z = wi[1];
hvt[4].x = ui[1]; hvt[4].y = vi[1]; hvt[4].z = wi[0];
hvt[5].x = ui[0]; hvt[5].y = vi[1]; hvt[5].z = wi[0];
// construct vertices at intersections with the edges
auto e_vert = [&ecoord](const int e, const int i) {
const unsigned int l_coord[3]{ 1324855, 5299420, 16733440 };
unsigned char flag = (l_coord[i] >> (2 * e)) & 3;
if (flag == 3)
return ecoord[e];
else
return (float)(flag);
};
// if there are three contours, then there is a tunnel and one
// of the contours is not part of it.
unsigned char _not_tunnel = 0xF;
if (cnt_ == 3) {
// loop over the contorus
// triangulate the contour which is not part of
// the tunnel
const float uc_min = (ui[0] < ui[1]) ? ui[0] : ui[1];
const float uc_max = (ui[0] < ui[1]) ? ui[1] : ui[0];
for (int t = 0; t < (int)cnt_; t++) {
if (get_cnt_size(t, c_) == 3) {
float umin = 2;
float umax = -2;
uint e0 = get_c(t, 0, c_);
uint e1 = get_c(t, 1, c_);
uint e2 = get_c(t, 2, c_);
const float u_e0 = e_vert(e0, 0);
const float u_e1 = e_vert(e1, 0);
const float u_e2 = e_vert(e2, 0);
umin = (u_e0 < umin) ? u_e0 : umin;
umin = (u_e1 < umin) ? u_e1 : umin;
umin = (u_e2 < umin) ? u_e2 : umin;
umax = (u_e0 > umax) ? u_e0 : umax;
umax = (u_e1 > umax) ? u_e1 : umax;
umax = (u_e2 > umax) ? u_e1 : umax;
if (uc_min > umax || uc_max < umin) {
// this contour is not part of the tunnel
_not_tunnel = t;
// save triangle in global memory
t_.triangles[atomicAdd(t_.t_size,1)] = make_int4(v_gindex[e0], v_gindex[e1], v_gindex[e2], 0);
}
}
}
}
// compute vertices of inner hexagon, save new vertices in list and compute and keep
// global vertice index to build triangle connectivity later on.
int tg_idx[6];
float4 po;
for (int i = 0; i < 6; i++) {
int address{ -1 };
tg_idx[i] = insert_vertex_fast(int(9 * gl_index + v_count), ht_, v_, address);
// update nr. of vertices
v_count++;
// create a store vertex and normal
//float4 po;
//float4 hn;
// local coordinates for trilinear interpolation
const float u = hvt[i].x; const float v = hvt[i].y; const float w = hvt[i].z;
po.x = (1 - w)*((1 - v)*(p[tr].x + u*(p[tr + 1].x - p[tr].x)) + v*(p[tr + 2].x + u*(p[tr + 3].x - p[tr + 2].x))) + w*((1 - v)*(p[tr + 4].x + u*(p[tr + 5].x - p[tr + 4].x)) + v*(p[tr + 6].x + u*(p[tr + 7].x - p[tr + 6].x)));
po.y = (1 - w)*((1 - v)*(p[tr].y + u*(p[tr + 1].y - p[tr].y)) + v*(p[tr + 2].y + u*(p[tr + 3].y - p[tr + 2].y))) + w*((1 - v)*(p[tr + 4].y + u*(p[tr + 5].y - p[tr + 4].y)) + v*(p[tr + 6].y + u*(p[tr + 7].y - p[tr + 6].y)));
po.z = (1 - w)*((1 - v)*(p[tr].z + u*(p[tr + 1].z - p[tr].z)) + v*(p[tr + 2].z + u*(p[tr + 3].z - p[tr + 2].z))) + w*((1 - v)*(p[tr + 4].z + u*(p[tr + 5].z - p[tr + 4].z)) + v*(p[tr + 6].z + u*(p[tr + 7].z - p[tr + 6].z)));
//trilinear(po, p, hvt[i].x, hvt[i].y, hvt[i].z);
po.w = 1.f;
v_.vertices[address] = po;
//trilinear(po, n, hvt[i].x, hvt[i].y, hvt[i].z);
po.x = (1 - w)*((1 - v)*(n[tr].x + u*(n[tr + 1].x - n[tr].x)) + v*(n[tr + 2].x + u*(n[tr + 3].x - n[tr + 2].x))) + w*((1 - v)*(n[tr + 4].x + u*(n[tr + 5].x - n[tr + 4].x)) + v*(n[tr + 6].x + u*(n[tr + 7].x - n[tr + 6].x)));
po.y = (1 - w)*((1 - v)*(n[tr].y + u*(n[tr + 1].y - n[tr].y)) + v*(n[tr + 2].y + u*(n[tr + 3].y - n[tr + 2].y))) + w*((1 - v)*(n[tr + 4].y + u*(n[tr + 5].y - n[tr + 4].y)) + v*(n[tr + 6].y + u*(n[tr + 7].y - n[tr + 6].y)));
po.z = (1 - w)*((1 - v)*(n[tr].z + u*(n[tr + 1].z - n[tr].z)) + v*(n[tr + 2].z + u*(n[tr + 3].z - n[tr + 2].z))) + w*((1 - v)*(n[tr + 4].z + u*(n[tr + 5].z - n[tr + 4].z)) + v*(n[tr + 6].z + u*(n[tr + 7].z - n[tr + 6].z)));
// normalize normal
const float factor = std::sqrt(po.x * po.x + po.y * po.y + po.z * po.z);
po.x = po.x / factor;
po.y = po.y / factor;
po.z = po.z / factor;
po.w = 0.f;
v_.normals[address] = po;
}
// triangulate contours with inner hexagon
unsigned char tcon_[12];
for (int i = 0; i < (int)cnt_; i++) {
if (_not_tunnel != i) { // contour belongs to tunnel
const int cnt_sz = (int)get_cnt_size(i, c_);
for (int r = 0; r < cnt_sz; r++) {
int index = -1;
double dist = 1000.;
uint ci = get_c(i, r, c_);
const float u_edge = e_vert(ci, 0);
const float v_edge = e_vert(ci, 1);
const float w_edge = e_vert(ci, 2);
for (int s = 0; s < 6; s++) {
const float uval = u_edge - hvt[s].x;
const float vval = v_edge - hvt[s].y;
const float wval = w_edge - hvt[s].z;
const float val = uval*uval + vval*vval + wval*wval;
if (dist > val) {
index = s;
dist = val;
}
}
tcon_[ci] = (unsigned char)index;
}
// correspondence between vertices found
// create triangles
// needs some functions
auto distanceRingIntsModulo = [](const int d1, const int d2) {
const int r = (d1 - d2) < 0 ? d2 - d1 : d1 - d2;
return (r > 2 ? 6 - r : r);
};
auto midpointRingIntModulo = [](const int d1, const int d2) {
const int dmax = (d1 > d2) ? d1 : d2;
const int dmin = (d1 < d2) ? d1 : d2;
return ((dmax + 2) % 6 == dmin) ? (dmax + 1) % 6 : (dmax + dmin) / 2;
};
for (int r = 0; r < cnt_sz; r++) {
const uint tid1 = get_c(i, r, c_);
const uint tid2 = get_c(i, ((r + 1) % cnt_sz), c_);
const uint cid1 = tcon_[tid1];
const uint cid2 = tcon_[tid2];
// compute index distance
const int dst = distanceRingIntsModulo(cid1, cid2);
switch (dst)
{
case 0:
{
t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[tid1], v_gindex[tid2], tg_idx[cid1],0);
}
break;
case 1:
{
// measure diagonals
// triangulate along shortest diagonal
float u_edge = e_vert(tid1, 0);
float v_edge = e_vert(tid1, 1);
float w_edge = e_vert(tid1, 2);
const float l1 = (u_edge - hvt[cid2].x)*(u_edge - hvt[cid2].x) + (v_edge - hvt[cid2].y)*(v_edge - hvt[cid2].y) + (w_edge - hvt[cid2].z)*(w_edge - hvt[cid2].z);
u_edge = e_vert(tid2, 0);
v_edge = e_vert(tid2, 1);
w_edge = e_vert(tid2, 2);
const double l2 = (u_edge - hvt[cid1].x)*(u_edge - hvt[cid1].x) + (v_edge - hvt[cid1].y)*(v_edge - hvt[cid1].y) + (w_edge - hvt[cid1].z)*(w_edge - hvt[cid1].z);
const int a_ = atomicAdd(t_.t_size, 2);
if (l1 < l2) {
t_.triangles[a_] = make_int4(v_gindex[tid1], v_gindex[tid2], tg_idx[cid2], 0);
t_.triangles[a_+1] = make_int4(v_gindex[tid1], tg_idx[cid2], tg_idx[cid1], 0);
}
else {
t_.triangles[a_] = make_int4(v_gindex[tid1], v_gindex[tid2], tg_idx[cid1], 0);
t_.triangles[a_ + 1] = make_int4(v_gindex[tid2], tg_idx[cid2], tg_idx[cid1], 0);
}
}
break;
case 2:
{
const int cidm = midpointRingIntModulo(cid1, cid2);
const int a_ = atomicAdd(t_.t_size, 3);
t_.triangles[a_] = make_int4(v_gindex[tid1], v_gindex[tid2], tg_idx[cidm], 0);
t_.triangles[a_+1] = make_int4(v_gindex[tid1], tg_idx[cidm], tg_idx[cid1], 0);
t_.triangles[a_+2] = make_int4(v_gindex[tid2], tg_idx[cid2], tg_idx[cidm], 0);
}
break;
} // switch
} // for loop over the vertices of the contour
} // if (_not_tunnel)
} // for loop over contours
if (cnt_ == 1) {
// there is a single contour
// triangulate and close inner hexagon
const int a_ = atomicAdd(t_.t_size, 4);
const bool s_ = asymptotic_decider(F[0], F[1], F[2], F[3]);
const bool of_ = (wi[1] < wi[0]) ? s_ : !s_;
if (!of_) {
t_.triangles[a_] = make_int4(tg_idx[0], tg_idx[2], tg_idx[1], 0);
t_.triangles[a_ + 1] = make_int4(tg_idx[2], tg_idx[4], tg_idx[3], 0);
t_.triangles[a_ + 2] = make_int4(tg_idx[0], tg_idx[5], tg_idx[4], 0);
t_.triangles[a_ + 3] = make_int4(tg_idx[0], tg_idx[4], tg_idx[2], 0);
}
else {
t_.triangles[a_] = make_int4(tg_idx[0], tg_idx[1], tg_idx[2], 0);
t_.triangles[a_ + 1] = make_int4(tg_idx[2], tg_idx[3], tg_idx[4], 0);
t_.triangles[a_ + 2] = make_int4(tg_idx[0], tg_idx[4], tg_idx[5], 0);
t_.triangles[a_ + 3] = make_int4(tg_idx[0], tg_idx[2], tg_idx[4], 0);
}
}
}
else {
// there is no tunnel
// handle case with no saddle point as simple polygons with 3, 4, 5 or six vertices
const unsigned char nr_u{ (unsigned char)nrQSolFace(0, q_sol) };
const unsigned char nr_v{ (unsigned char)nrQSolFace(1, q_sol) };
const unsigned char nr_w{ (unsigned char)nrQSolFace(2, q_sol) };
const unsigned char nr_t{ (unsigned char)(nr_u + nr_v + nr_w) };
if (nr_t == nr_u || nr_t == nr_v || nr_t == nr_w) {
// loop over all contours
for (int i = 0; i < (int)cnt_; i++) {
switch (get_cnt_size(i, c_)) {
case 3:
{
//const int a_ = atomicAdd(t_.t_size, 1);
t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)], 0);
}
break;
case 4:
{
const int a_ = atomicAdd(t_.t_size, 2);
t_.triangles[a_] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)], 0);
t_.triangles[a_+1] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 2, c_)], v_gindex[get_c(i, 3, c_)], 0);
}
break;
case 5:
{
const int a_ = atomicAdd(t_.t_size, 3);
t_.triangles[a_] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)], 0);
t_.triangles[a_+1] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 2, c_)], v_gindex[get_c(i, 3, c_)], 0);
t_.triangles[a_+2] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 3, c_)], v_gindex[get_c(i, 4, c_)], 0);
}
break;
case 6:
{
const int a_ = atomicAdd(t_.t_size, 4);
t_.triangles[a_] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 3, c_)], 0);
t_.triangles[a_+1] = make_int4(v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)], v_gindex[get_c(i, 3, c_)], 0);
t_.triangles[a_+2] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 3, c_)], v_gindex[get_c(i, 4, c_)], 0);
t_.triangles[a_+3] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 4, c_)], v_gindex[get_c(i, 5, c_)], 0);
}
break;
} // switch over size of contour
} // loop over contorus
} // thre are no saddle points
else {
// there are saddle points
//fc1 = fs(1, 1)*fs(2, 1) + fs(1, 2)*fs(2, 2);
//fc2 = fs(1, 1)*fs(3, 1) + fs(1, 2)*fs(3, 2);
//fc3 = fs(2, 1)*fs(3, 2) + fs(2, 2)*fs(3, 1);
unsigned char fs[3][2]{{(unsigned char)(q_sol & 1), (unsigned char)((q_sol >> 1) & 1)}, { (unsigned char)((q_sol >> 2) & 1), (unsigned char)((q_sol >> 3) & 1) }, { (unsigned char)((q_sol >> 4) & 1), (unsigned char)((q_sol >> 5) & 1) }};
const unsigned char fc1 = fs[0][0] * fs[1][0] + fs[0][1] * fs[1][1];
const unsigned char fc2 = fs[0][0] * fs[2][0] + fs[0][1] * fs[2][1];
const unsigned char fc3 = fs[1][0] * fs[2][1] + fs[1][1] * fs[2][0];
const unsigned char c_faces = fc1 + fc2 + fc3;
float ucoord{};
float vcoord{};
float wcoord{};
switch (c_faces) {
case 2:
{
if (fc1 == 0) {
ucoord = fs[0][0] * ui[0] + fs[0][1] * ui[1];
vcoord = fs[1][0] * vi[0] + fs[1][1] * vi[1];
wcoord = fs[1][0] * wi[1] + fs[1][1] * wi[0];
}
else if (fc2 == 0) {
ucoord = fs[0][0] * ui[0] + fs[0][1] * ui[1];
vcoord = fs[0][0] * vi[0] + fs[0][1] * vi[1];
wcoord = fs[0][0] * wi[1] + fs[0][1] * wi[0];
}
else if (fc3 == 0) {
ucoord = fs[1][0] * ui[0] + fs[1][1] * ui[1];
vcoord = fs[1][0] * vi[0] + fs[1][1] * vi[1];
wcoord = fs[1][0] * wi[0] + fs[1][1] * wi[1];
}
}
break;
case 3:
{
ucoord = (fs[0][0] * ui[0] + fs[0][1] * ui[1]) / (fs[0][0] + fs[0][1]);
vcoord = (fs[1][0] * vi[0] + fs[1][1] * vi[1]) / (fs[1][0] + fs[1][1]);
wcoord = (fs[2][0] * wi[0] + fs[2][1] * wi[1]) / (fs[2][0] + fs[2][1]);
}
break;
case 4:
{
const unsigned char nr_u = fs[0][0] + fs[0][1];
const unsigned char nr_v = fs[1][0] + fs[1][1];
const unsigned char nr_w = fs[2][0] + fs[2][1];
if (nr_w == 1) {
ucoord = fs[2][0] * ui[0] + fs[2][1] * ui[1];
vcoord = fs[2][1] * vi[0] + fs[2][0] * vi[1];
wcoord = fs[2][0] * wi[0] + fs[2][1] * wi[1];
}
else if (nr_v == 1) {
ucoord = fs[1][0] * ui[0] + fs[1][1] * ui[1];
vcoord = fs[1][0] * vi[0] + fs[1][1] * vi[1];
wcoord = fs[1][1] * wi[0] + fs[1][0] * wi[1];
}
else if (nr_u == 1) {
ucoord = fs[0][0] * ui[0] + fs[0][1] * ui[1];
vcoord = fs[0][0] * vi[0] + fs[0][1] * vi[1];
wcoord = fs[0][0] * wi[0] + fs[0][1] * wi[1];
}
}
break;
} // switch(c_faces)
// create inner vertex
float4 ip;
float4 in;
//ip.x = (1 - wcoord)*((1 - vcoord)*(p[0].x + ucoord*(p[1].x - p[0].x)) + vcoord*(p[2].x + ucoord*(p[3].x - p[2].x))) + wcoord*((1 - vcoord)*(p[4].x + ucoord*(p[5].x - p[4].x)) + vcoord*(p[6].x + ucoord*(p[7].x - p[6].x)));
//ip.y = (1 - wcoord)*((1 - vcoord)*(p[0].y + ucoord*(p[1].y - p[0].y)) + vcoord*(p[2].y + ucoord*(p[3].y - p[2].y))) + wcoord*((1 - vcoord)*(p[4].y + ucoord*(p[5].y - p[4].y)) + vcoord*(p[6].y + ucoord*(p[7].y - p[6].y)));
//ip.z = (1 - wcoord)*((1 - vcoord)*(p[0].z + ucoord*(p[1].z - p[0].z)) + vcoord*(p[2].z + ucoord*(p[3].z - p[2].z))) + wcoord*((1 - vcoord)*(p[4].z + ucoord*(p[5].z - p[4].z)) + vcoord*(p[6].z + ucoord*(p[7].z - p[6].z)));
//in.x = (1 - wcoord)*((1 - vcoord)*(n[0].x + ucoord*(n[1].x - n[0].x)) + vcoord*(n[2].x + ucoord*(n[3].x - n[2].x))) + wcoord*((1 - vcoord)*(n[4].x + ucoord*(n[5].x - n[4].x)) + vcoord*(n[6].x + ucoord*(n[7].x - n[6].x)));
//in.y = (1 - wcoord)*((1 - vcoord)*(n[0].y + ucoord*(n[1].y - n[0].y)) + vcoord*(n[2].y + ucoord*(n[3].y - n[2].y))) + wcoord*((1 - vcoord)*(n[4].y + ucoord*(n[5].y - n[4].y)) + vcoord*(n[6].y + ucoord*(n[7].y - n[6].y)));
//in.z = (1 - wcoord)*((1 - vcoord)*(n[0].z + ucoord*(n[1].z - n[0].z)) + vcoord*(n[2].z + ucoord*(n[3].z - n[2].z))) + wcoord*((1 - vcoord)*(n[4].z + ucoord*(n[5].z - n[4].z)) + vcoord*(n[6].z + ucoord*(n[7].z - n[6].z)));
trilinear(ip, p, ucoord, vcoord, wcoord);
trilinear(in, n, ucoord, vcoord, wcoord);
// normalize normal
const float factor = std::sqrt(in.x * in.x + in.y * in.y + in.z * in.z);
in.x = in.x / factor;
in.y = in.y / factor;
in.z = in.z / factor;
// the fourth coordinate
ip.w = 1.f;
in.w = 0.f;
// global index
//const int gidx = int(9 * gl_index + v_count);
int gidx = int(9 * gl_index + v_count);
// this point is only used if contours with more than three vertices
// are present
//bool pt_used{ false };
// check if the vertex will be used, this happens
// if there are contours with more than three edges
for (int i = 0; i < (int)cnt_; i++) {
if (get_cnt_size(i, c_) > 3) {
int address{ -1 };
gidx = insert_vertex_fast(gidx, ht_, v_, address);
//gidx = v_.add(ht_, gidx, ip, in);
v_count++;
float4 ip;
//trilinear(ip, p, ucoord, vcoord, wcoord);
ip.x = (1 - wcoord)*((1 - vcoord)*(p[tr].x + ucoord*(p[tr + 1].x - p[tr].x)) + vcoord*(p[tr + 2].x + ucoord*(p[tr + 3].x - p[tr + 2].x))) + wcoord*((1 - vcoord)*(p[tr + 4].x + ucoord*(p[tr + 5].x - p[tr + 4].x)) + vcoord*(p[tr + 6].x + ucoord*(p[tr + 7].x - p[tr + 6].x)));
ip.y = (1 - wcoord)*((1 - vcoord)*(p[tr].y + ucoord*(p[tr + 1].y - p[tr].y)) + vcoord*(p[tr + 2].y + ucoord*(p[tr + 3].y - p[tr + 2].y))) + wcoord*((1 - vcoord)*(p[tr + 4].y + ucoord*(p[tr + 5].y - p[tr + 4].y)) + vcoord*(p[tr + 6].y + ucoord*(p[tr + 7].y - p[tr + 6].y)));
ip.z = (1 - wcoord)*((1 - vcoord)*(p[tr].z + ucoord*(p[tr + 1].z - p[tr].z)) + vcoord*(p[tr + 2].z + ucoord*(p[tr + 3].z - p[tr + 2].z))) + wcoord*((1 - vcoord)*(p[tr + 4].z + ucoord*(p[tr + 5].z - p[tr + 4].z)) + vcoord*(p[tr + 6].z + ucoord*(p[tr + 7].z - p[tr + 6].z)));
ip.w = 1.f;
v_.vertices[address] = ip;
//trilinear(ip, n, ucoord, vcoord, wcoord);
ip.x = (1 - wcoord)*((1 - vcoord)*(n[tr].x + ucoord*(n[tr + 1].x - n[tr].x)) + vcoord*(n[tr + 2].x + ucoord*(n[tr + 3].x - n[tr + 2].x))) + wcoord*((1 - vcoord)*(n[tr + 4].x + ucoord*(n[tr + 5].x - n[tr + 4].x)) + vcoord*(n[tr + 6].x + ucoord*(n[tr + 7].x - n[tr + 6].x)));
ip.y = (1 - wcoord)*((1 - vcoord)*(n[tr].y + ucoord*(n[tr + 1].y - n[tr].y)) + vcoord*(n[tr + 2].y + ucoord*(n[tr + 3].y - n[tr + 2].y))) + wcoord*((1 - vcoord)*(n[tr + 4].y + ucoord*(n[tr + 5].y - n[tr + 4].y)) + vcoord*(n[tr + 6].y + ucoord*(n[tr + 7].y - n[tr + 6].y)));
ip.z = (1 - wcoord)*((1 - vcoord)*(n[tr].z + ucoord*(n[tr + 1].z - n[tr].z)) + vcoord*(n[tr + 2].z + ucoord*(n[tr + 3].z - n[tr + 2].z))) + wcoord*((1 - vcoord)*(n[tr + 4].z + ucoord*(n[tr + 5].z - n[tr + 4].z)) + vcoord*(n[tr + 6].z + ucoord*(n[tr + 7].z - n[tr + 6].z)));
// normalize normal
const float factor = std::sqrt(ip.x * ip.x + ip.y * ip.y + ip.z * ip.z);
ip.x = ip.x / factor;
ip.y = ip.y / factor;
ip.z = ip.z / factor;
ip.w = 0.f;
v_.normals[address] = ip;
break;
}
}
// loop over the contorus
for (int i = 0; i < (int)cnt_; i++) {
const unsigned char cnt_sz = (unsigned char)get_cnt_size(i, c_);
if (cnt_sz == 3) {
//const int a_ = atomicAdd(t_.t_size, 1);
t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)], 0);
}
else {
//pt_used = true;
for (int t = 0; t < cnt_sz; t++) {
// add triangle to list
t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[get_c(i, t, c_)], v_gindex[get_c(i, (t + 1) % cnt_sz, c_)], gidx, 0);
}
}
}
} // else - there are saddle points
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Map vertex global id to position in vertex array
// to construct shared vertex list
__global__ void map_triangles(const int nr_t, VertexHashTable ht_, Triangles t_)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= nr_t)
return;
t_.triangles[tid].x = ht_.addr[t_.triangles[tid].x]; //find_vertex(k0, ht_);
t_.triangles[tid].y = ht_.addr[t_.triangles[tid].y]; //find_vertex(k1, ht_);
t_.triangles[tid].z = ht_.addr[t_.triangles[tid].z]; //find_vertex(k2, ht_);
}
__global__ void map_triangles_fast(const int nr_t, VertexHashTable ht_, Triangles t_)
{
const int tid = (blockIdx.x * blockDim.x + threadIdx.x);
const int offset = tid % 3;
const int t = tid / 3;
if (t >= nr_t)
return;
switch (offset) {
case 0:
t_.triangles[t].x = ht_.addr[t_.triangles[t].x]; //find_vertex(k0, ht_);
break;
case 1:
t_.triangles[t].y = ht_.addr[t_.triangles[t].y]; //find_vertex(k1, ht_);
break;
case 2:
t_.triangles[t].z = ht_.addr[t_.triangles[t].z]; //find_vertex(k2, ht_);
break;
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// create three halfedges from triangle
// for the triangle store a corresponding halfe edge
// for each vertex store the starting halfedge
__global__ void create_halfedge(const int nr_he, Triangles t_, Halfedges he_, HalfedgeHashTable het_)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= nr_he) {
return;
}
const int offset = tid % 3;
const int t = tid / 3;
// get data from global memory
const int4 tri = t_.triangles[tid / 3];
// create and save halfedges
// halfedge
switch (offset) {
case 0:
he_.he_e[tid].x = tri.x; // origin vertex
he_.he_e[tid].y = t; // face id
he_.he_e[tid].z = tid + 1; // next halfedge
he_.he_e[tid].w = -1; // boundary edge at initialization
he_.he_v[tri.x] = tid;
break;
case 1:
he_.he_e[tid].x = tri.y; // origin vertex
he_.he_e[tid].y = t; // face id
he_.he_e[tid].z = tid + 1; // next halfedge
he_.he_e[tid].w = -1; // boundary edge at initialization
he_.he_v[tri.y] = tid;
break;
case 2:
he_.he_e[tid].x = tri.z; // origin vertex
he_.he_e[tid].y = t; // face id
he_.he_e[tid].z = tid - 2; // next halfedge
he_.he_e[tid].w = -1; // boundary edge at initialization
he_.he_v[tri.z] = tid;
break;
}
// save halfedge ids in hash table to construct later twins neighborhood
// insert_halfedge_id(const int t_size, unsigned long long *he_table, int2* he_ids, int he_addr, int v0, int v1)
//insert_halfedge_id(he_size, he_table, he_ids, 3 * tid, tri.x, tri.y);
//insert_halfedge_id(he_size, he_table, he_ids, 3 * tid + 1, tri.y, tri.z);
//insert_halfedge_id(he_size, he_table, he_ids, 3 * tid + 2, tri.z, tri.x);
addHalfedgeToHashTable(het_, tid, tri.x, tri.y);
//addHalfedgeToHashTable(het_,3 * tid + 1, tri.y, tri.z);
//addHalfedgeToHashTable(het_,3 * tid + 2, tri.z, tri.x);
// map vertex to halfedge
//he_.he_v[tri.x] = 3 * tid;
//he_.he_v[tri.y] = 3 * tid + 1;
//he_.he_v[tri.z] = 3 * tid + 2;
// map face to halfedge, this is a redundant information
//he_f[tid] = 3 * tid;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// map vertex index of src vertex in halfedge indices to final index in vertex array
// at this point we know the total nr. of half edges, and the total nr. of vertices
__global__ void map_halfedge_vertex(const int nr_he, int4* he_e, VertexHashTable ht_, int* he_v)
{
const int gl_index = blockIdx.x * blockDim.x + threadIdx.x;
if (gl_index >= nr_he)
return;
// he.x = origin vertex
// he.y = face
// he.z = next
// he.w = tween
// set vertex id
//const int v = find_vertex(he_e[gl_index].x, ht_);
const int v = ht_.addr[he_e[gl_index].x];
he_e[gl_index].x = v;
he_v[v] = gl_index; // vertex points to this halfedges
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// map index to boundary vertex
__global__ void map_halfedge_bndvertex(const int nr_he, int4* he_e, int* he_v)
{
const int gl_index = blockIdx.x * blockDim.x + threadIdx.x;
if (gl_index >= nr_he)
return;
// he.x = origin vertex
// he.y = face
// he.z = next
// he.w = tween, -1 if boundary edge
if (he_e[gl_index].w == -1) {
he_v[he_e[gl_index].x] = gl_index;
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// map halfedge twins
__global__ void map_halfedge_twins(Halfedges he_, HalfedgeHashTable het_)
{
const int gl_index = blockIdx.x * blockDim.x + threadIdx.x;
if (gl_index >= het_.t_size) {
return;
}
if (het_.key[gl_index] > 0) {
const int he0 = het_.he_ids[gl_index].x;
const int he1 = het_.he_ids[gl_index].y;
he_.he_e[he0].w = he1;
he_.he_e[he1].w = he0;
}
}
__global__ void map_halfedge_twins_fast(Halfedges he_, HalfedgeHashTable het_)
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int gl_index = tid / 2;
const int offset = tid % 2;
if (gl_index >= het_.t_size) {
return;
}
if (het_.key[gl_index] > 0) {
const int he0 = het_.he_ids[gl_index].x;
const int he1 = het_.he_ids[gl_index].y;
switch (offset) {
case 0:
he_.he_e[he0].w = he1;
break;
case 1:
he_.he_e[he1].w = he0;
break;
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// add halfedge to table
__device__ void addHalfedges(Halfedges he_, HalfedgeHashTable het_, const int v0, const int v1, const int v2)
{
const int a_ = atomicAdd(he_.t_size, 1);
const int f_ = a_ / 3;
// he 0
he_.he_e[a_].x = v0;
he_.he_e[a_].y = f_; // there are three halfedges for each face (triangle)
he_.he_e[a_].z = a_ + 1; // next
he_.he_e[a_].w = -1; // default is boundary edge
addHalfedgeToHashTable(het_,a_, v0, v1);
// he 1
he_.he_e[a_ + 1].x = v1;
he_.he_e[a_ + 1].y = f_; // there are three halfedges for each face (triangle)
he_.he_e[a_ + 1].z = a_ + 2;
he_.he_e[a_ + 1].w = -1; // default is boundary edge
addHalfedgeToHashTable(het_,a_ + 1, v1, v2);
// he 2
he_.he_e[a_ + 2].x = v2;
he_.he_e[a_ + 2].y = f_; // there are three halfedges for each face (triangle)
he_.he_e[a_ + 2].z = a_;
he_.he_e[a_ + 2].w = -1; // default is boundary edge
addHalfedgeToHashTable(het_,a_ + 2, v2, v0);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Halfedge Marching cubes
__global__ void he_mcSlice(const float i0, cudaTextureObject_t v_data, UGrid ugrid, MC_lookup l_tables, int nr_cells, int* cellid, AmbiguousCells ac_, VertexHashTable ht_, Vertices v_, HalfedgeHashTable het_, Halfedges he_)
{
// get thread id
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (nr_cells <= tid)
return;
// compute grid indices from global index
const int gl_index = cellid[tid];
const int i_index = ugrid.i_index(gl_index);
const int j_index = ugrid.j_index(gl_index);
const int k_index = ugrid.k_index(gl_index);
// construct 8 cell vertices
float3 v[8];
cell_vertices(v, i_index, j_index, k_index, ugrid);
// scalar values at vertices
float u[8];
u[0] = tex3D<float>(v_data, i_index, j_index, k_index);
u[1] = tex3D<float>(v_data, i_index + 1, j_index, k_index);
u[2] = tex3D<float>(v_data, i_index, j_index + 1, k_index);
u[3] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index);
u[4] = tex3D<float>(v_data, i_index, j_index, k_index + 1);
u[5] = tex3D<float>(v_data, i_index + 1, j_index, k_index + 1);
u[6] = tex3D<float>(v_data, i_index, j_index + 1, k_index + 1);
u[7] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index + 1);
// compute normals at vertices
float3 n[8];
gradient(n, v_data, ugrid, i_index, j_index, k_index);
// compute case
uint i_case{ 0 };
i_case = i_case + ((uint)(u[0] >= i0));
i_case = i_case + ((uint)(u[1] >= i0)) * 2;
i_case = i_case + ((uint)(u[2] >= i0)) * 4;
i_case = i_case + ((uint)(u[3] >= i0)) * 8;
i_case = i_case + ((uint)(u[4] >= i0)) * 16;
i_case = i_case + ((uint)(u[5] >= i0)) * 32;
i_case = i_case + ((uint)(u[6] >= i0)) * 64;
i_case = i_case + ((uint)(u[7] >= i0)) * 128;
// ambiguous cases are processed in the next pass
//if (105 == l_tables.t_ambig[i_case]) {
ushort e_ = l_tables.e_[i_case];
if (e_ & BIT_16) {
//ac_.add(gl_index);
ac_.a_cells[atomicAdd(ac_.t_size, 1)] = gl_index;
return; // don't process this cell with standard MC
}
// Compute intersection with edges
const unsigned long long gei_pattern_ = 670526590282893600ull;
const unsigned char l_edges_[12]{ 16, 49, 50, 32, 84, 117, 118, 100, 64, 81, 115, 98 };
int v_gindex[12]{};
ushort flag{ 1 };
//const ushort e_pattern = l_tables.ePattern(i_case); // l_tables.e_pattern[i_case];
for (int e = 0; e < 12; e++) {
v_gindex[e] = -1;
if (flag & e_) {
// compute edge inersection
// compute local coordinate along edge
const int v0 = (l_edges_[e] & 0xF);
const int v1 = (l_edges_[e] >> 4) & 0xF;
const float l = (i0 - u[v0]) / (u[v1] - u[v0]);
float4 vp = make_float4(v[v0].x + l*(v[v1].x - v[v0].x), v[v0].y + l*(v[v1].y - v[v0].y), v[v0].z + l*(v[v1].z - v[v0].z), 1.f);
float4 np = make_float4(n[v0].x + l*(n[v1].x - n[v0].x), n[v0].y + l*(n[v1].y - n[v0].y), n[v0].z + l*(n[v1].z - n[v0].z), 0.f);
const float length = std::sqrt(np.x * np.x + np.y * np.y + np.z * np.z);
np.x = np.x / length;
np.y = np.y / length;
np.z = np.z / length;
// get unique vertex index
// compute vertex global index
const int ix = i_index + (int)((gei_pattern_ >> 5 * e) & 1); // global_edge_id[eg][0];
const int iy = j_index + (int)((gei_pattern_ >> (5 * e + 1)) & 1); // global_edge_id[eg][1];
const int iz = k_index + (int)((gei_pattern_ >> (5 * e + 2)) & 1); // global_edge_id[eg][2];
const int off_val = (int)((gei_pattern_ >> (5 * e + 3)) & 3);
v_gindex[e] = insert_vertex(int(9 * ugrid.gl_index(ix, iy, iz) + off_val), ht_, v_, vp, np);
}
flag <<= 1;
}
// compute triangles
//const unsigned char* t_ambig = l_tables.t_ambig;
unsigned long long tl_ = l_tables.t_[i_case];
for (int t = 0; t < 16; t += 3) {
//const int t_index = i_case * 16 + t;
//if (t_pattern[i_case * 16 + t] == -1) {
if (((tl_ >> (4 * t)) & 0xFull) == 0xF) {
// there are no more triangles
break;
}
// save tirangle
const int v0 = (int)((tl_ >> (4 * t)) & 0xFull);
const int v1 = (int)((tl_ >> (4 * (t + 1))) & 0xFull);
const int v2 = (int)((tl_ >> (4 * (t + 2))) & 0xFull);
// create three halfedges
addHalfedges(he_, het_, v_gindex[v0], v_gindex[v1], v_gindex[v2]);
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute ambiguous cells
__global__ void he_tSlice(const float i0, cudaTextureObject_t v_data, UGrid ugrid, MC_lookup l_tables, const int nr_cells, AmbiguousCells ac_, VertexHashTable ht_,Vertices v_, HalfedgeHashTable het_, Halfedges he_)
{
// get cell id
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (nr_cells <= tid)
return;
// compute grid indices from global index
const int gl_index = ac_.a_cells[tid];
const int i_index = ugrid.i_index(gl_index);
const int j_index = ugrid.j_index(gl_index);
const int k_index = ugrid.k_index(gl_index);
// construct 8 cell vertices
float3 p[8];
cell_vertices(p, i_index, j_index, k_index, ugrid);
// scalar values at vertices
float F[8];
F[0] = tex3D<float>(v_data, i_index, j_index, k_index);
F[1] = tex3D<float>(v_data, i_index + 1, j_index, k_index);
F[2] = tex3D<float>(v_data, i_index, j_index + 1, k_index);
F[3] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index);
F[4] = tex3D<float>(v_data, i_index, j_index, k_index + 1);
F[5] = tex3D<float>(v_data, i_index + 1, j_index, k_index + 1);
F[6] = tex3D<float>(v_data, i_index, j_index + 1, k_index + 1);
F[7] = tex3D<float>(v_data, i_index + 1, j_index + 1, k_index + 1);
// compute normals at vertices
float3 n[8];
gradient(n, v_data, ugrid, i_index, j_index, k_index);
// compute case
uint i_case{ 0 };
i_case = i_case + ((uint)(F[0] >= i0));
i_case = i_case + ((uint)(F[1] >= i0)) * 2;
i_case = i_case + ((uint)(F[2] >= i0)) * 4;
i_case = i_case + ((uint)(F[3] >= i0)) * 8;
i_case = i_case + ((uint)(F[4] >= i0)) * 16;
i_case = i_case + ((uint)(F[5] >= i0)) * 32;
i_case = i_case + ((uint)(F[6] >= i0)) * 64;
i_case = i_case + ((uint)(F[7] >= i0)) * 128;
// Compute intersection with edges
const unsigned long long gei_pattern_ = 670526590282893600ull;
const unsigned char l_edges_[12]{ 16, 49, 50, 32, 84, 117, 118, 100, 64, 81, 115, 98 };
// compute intersection with cell edges
float ecoord[12]{};
int v_gindex[12]{};
ushort flag{ 1 };
ushort e_ = l_tables.e_[i_case];
//ushort e_pattern = l_tables.ePattern(i_case); // l_tables.e_pattern[i_case];
for (int e = 0; e < 12; e++) {
v_gindex[e] = -1;
//ecoord[e] = 0.f;
if (flag & e_) {
// compute edge inersection
// compute local coordinate along edge
const int v0 = (l_edges_[e] & 0xF);
const int v1 = (l_edges_[e] >> 4) & 0xF;
const float l = (i0 - F[v0]) / (F[v1] - F[v0]);
float4 vp = make_float4(p[v0].x + l*(p[v1].x - p[v0].x), p[v0].y + l*(p[v1].y - p[v0].y), p[v0].z + l*(p[v1].z - p[v0].z), 1.f);
float4 np = make_float4(n[v0].x + l*(n[v1].x - n[v0].x), n[v0].y + l*(n[v1].y - n[v0].y), n[v0].z + l*(n[v1].z - n[v0].z), 0.f);
const float length = std::sqrt(np.x * np.x + np.y * np.y + np.z * np.z);
np.x = np.x / length;
np.y = np.y / length;
np.z = np.z / length;
// get unique vertex index
// compute vertex global index
const int ix = i_index + (int)((gei_pattern_ >> 5 * e) & 1); // global_edge_id[eg][0];
const int iy = j_index + (int)((gei_pattern_ >> (5 * e + 1)) & 1); // global_edge_id[eg][1];
const int iz = k_index + (int)((gei_pattern_ >> (5 * e + 2)) & 1); // global_edge_id[eg][2];
const int off_val = (int)((gei_pattern_ >> (5 * e + 3)) & 3);
v_gindex[e] = insert_vertex(int(9 * ugrid.gl_index(ix, iy, iz) + off_val), ht_, v_, vp, np);
// remember local coordinate along edge
ecoord[e] = l;
}
flag <<= 1;
}
// compute oriented contours
// 1. build segments
// 2. connect segments
// build up segments
// set segments map
unsigned char segm_[12] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
auto set_segm = [](const int ei, const int eo, unsigned char segm_[12]) {
segm_[ei] &= 0xF0;
segm_[ei] |= ((unsigned char)eo) & 0xF;
segm_[eo] &= 0xF;
segm_[eo] |= ((unsigned char)ei) << 4;
};
auto get_segm = [](const int e, const int pos, unsigned char segm_[12]) {
if (pos == 0)
return (int)(segm_[e] & 0xF);
else
return (int)((segm_[e] >> 4) & 0xF);
};
auto is_segm_set = [](const int e, unsigned char segm_[12]) {
return (segm_[e] != 0xFF);
};
auto unset_segm = [](const int e, unsigned char segm_[12]) {
segm_[e] = 0xFF;
};
// In order to compute oriented segments, the hexahedron has to be flatten.
// The insides of the faces of the hexahedron have to be all at the same
// side of the flattend hexa. This requires changing the order of the
// edges when reading from the faces
// code edges at face
unsigned short e_face_[6]{ (ushort)291, (ushort)18277, (ushort)18696, (ushort)10859, (ushort)33719, (ushort)38305 };
// code vertices at face
unsigned short v_face_[6]{ (ushort)12576, (ushort)25717, (ushort)5380, (ushort)29538, (ushort)8292, (ushort)30001 };
// reading edge from face
auto get_face_e = [e_face_](const int f, const int e) { return ((e_face_[f] >> (4 * e)) & 0xF); };
auto get_face_v = [v_face_](const int f, const int e) { return ((v_face_[f] >> (4 * e)) & 0xF); };
// compute oriented segments using the isoline scheme at the faces
auto asymptotic_decider = [](const float f0, const float f1, const float f2, const float f3) {
return (f0*f3 - f1*f2) / (f0 + f3 - f1 - f2);
};
uchar f_flag{ 0 };
for (int f = 0; f < 6; f++) {
// classify face
unsigned int f_case{ 0 };
const int v0 = get_face_v(f, 0);
const int v1 = get_face_v(f, 1);
const int v2 = get_face_v(f, 2);
const int v3 = get_face_v(f, 3);
const int e0 = get_face_e(f, 0);
const int e1 = get_face_e(f, 1);
const int e2 = get_face_e(f, 2);
const int e3 = get_face_e(f, 3);
const float f0 = F[v0];
const float f1 = F[v1];
const float f2 = F[v2];
const float f3 = F[v3];
if (f0 >= i0)
f_case |= BIT_1;
if (f1 >= i0)
f_case |= BIT_2;
if (f2 >= i0)
f_case |= BIT_3;
if (f3 >= i0)
f_case |= BIT_4;
switch (f_case)
{
case 1:
set_segm(e0, e3, segm_);
break;
case 2:
set_segm(e1, e0, segm_);
break;
case 3:
set_segm(e1, e3, segm_);
break;
case 4:
set_segm(e3, e2, segm_);
break;
case 5:
set_segm(e0, e2, segm_);
break;
case 6:
{
const float val = asymptotic_decider(f0, f1, f2, f3);
if (val > i0) {
set_segm(e3, e0, segm_);
set_segm(e1, e2, segm_);
}
else if (val < i0) {
set_segm(e1, e0, segm_);
set_segm(e3, e2, segm_);
}
else {
// set flag for this face
f_flag |= (1 << f);
float ec0 = ecoord[e0];
float ec1 = ecoord[e1];
float ec2 = ecoord[e2];
float ec3 = ecoord[e3];
if ((0x218 >> (f * 2)) & BIT_1) {
ec0 = 1 - ec0;
ec2 = 1 - ec2;
}
if ((0x218 >> (f * 2)) & BIT_2) {
ec1 = 1 - ec1;
ec3 = 1 - ec3;
}
if (ec1 < ec3 && ec0 > ec2) {
set_segm(e1, e0, segm_);
set_segm(e3, e2, segm_);
}
else if (ec1 > ec3 && ec0 < ec2) {
set_segm(e3, e0, segm_);
set_segm(e1, e2, segm_);
}
else {
return;
}
}
}
break;
case 7:
set_segm(e1, e2, segm_);
break;
case 8:
set_segm(e2, e1, segm_);
break;
case 9:
{
const double val = asymptotic_decider(f0, f1, f2, f3);
if (val > i0) {
set_segm(e0, e1, segm_);
set_segm(e2, e3, segm_);
}
else if (val < i0) {
set_segm(e0, e3, segm_);
set_segm(e2, e1, segm_);
}
else {
f_flag = (1 << f);
// singular case val == i0, there are no asymptotes
// check if there is a reasonable triangulation of the face
float ec0 = ecoord[e0];
float ec1 = ecoord[e1];
float ec2 = ecoord[e2];
float ec3 = ecoord[e3];
if ((0x218 >> (f * 2)) & BIT_1) {
ec0 = 1 - ec0;
ec2 = 1 - ec2;
}
if ((0x218 >> (f * 2)) & BIT_2) {
ec1 = 1 - ec1;
ec3 = 1 - ec3;
}
if (ec1 < ec3 && ec0 > ec2) {
set_segm(e0, e1, segm_);
set_segm(e2, e3, segm_);
}
else if (ec1 > ec3 && ec0 < ec2) {
set_segm(e0, e3, segm_);
set_segm(e2, e1, segm_);
}
else {
return;
}
}
}
break;
case 10:
set_segm(e2, e0, segm_);
break;
case 11:
set_segm(e2, e3, segm_);
break;
case 12:
set_segm(e3, e1, segm_);
break;
case 13:
set_segm(e0, e1, segm_);
break;
case 14:
set_segm(e3, e0, segm_);
break;
default:
break;
}
}
// connect oriented segments into oriented contours
// closed contours are coded in 64 bit unsigned long long
// 1) Each entry has 4 bits
// 2) The first 4 entries are reserved for the size of the contours
// 3) The next 12 entries are the indices of the edges constituting the contorus
// The indices are numbers from 0 to 12
unsigned long long c_ = 0xFFFFFFFFFFFF0000;
// in the 4 first bits store size of contours
auto get_cnt_size = [](const int cnt, unsigned long long &c_) {
return (size_t)((c_ & (0xF << 4 * cnt)) >> 4 * cnt);
};
auto set_cnt_size = [](const int cnt, const int size, unsigned long long &c_) {
// unset contour size
c_ &= ~(0xF << 4 * cnt);
c_ |= (size << 4 * cnt);
};
// set corresponging edge
auto set_c = [](const int cnt, const int pos, const int val, unsigned long long &c_) {
const uint mask[4] = { 0x0, 0xF, 0xFF, 0xFFF };
const uint c_sz = c_ & mask[cnt];
const uint e = 16 + 4 * ((c_sz & 0xF) + ((c_sz & 0xF0) >> 4) + ((c_sz & 0xF00) >> 8) + pos);
c_ &= ~(((unsigned long long)0xF) << e);
c_ |= (((unsigned long long)val) << e);
};
// read edge from contour
auto get_c = [](const int cnt, const int pos, unsigned long long c_) {
const uint mask[4] = { 0x0, 0xF, 0xFF, 0xFFF };
const uint c_sz = (uint)(c_ & mask[cnt]);
const uint e = 16 + 4 * ((c_sz & 0xF) + ((c_sz & 0xF0) >> 4) + ((c_sz & 0xF00) >> 8) + pos);
return (int)((c_ >> e) & 0xF);
};
// connect oriented contours
uint cnt_{ 0 };
for (uint e = 0; e < 12; e++) {
if (is_segm_set(e, segm_)) {
uint eTo = get_segm(e, 0, segm_);
uint eIn = get_segm(e, 1, segm_);
uint eStart = e;
uint pos = 0;
set_c(cnt_, pos, eStart, c_);
while (eTo != eStart) {
pos = pos + 1;
set_c(cnt_, pos, eTo, c_);
eIn = eTo;
eTo = get_segm(eIn, 0, segm_);
unset_segm(eIn, segm_);
}
// set contour length
set_cnt_size(cnt_, pos + 1, c_);
// update number of contours
cnt_ = cnt_ + 1;
}
}
// compute intersection of opposite faces
float ui[2]{};
float vi[2]{};
float wi[2]{};
unsigned char q_sol{ 0 };
const float a = (F[0] - F[1])*(-F[6] + F[7] + F[4] - F[5]) - (F[4] - F[5])*(-F[2] + F[3] + F[0] - F[1]);
const float b = (i0 - F[0])*(-F[6] + F[7] + F[4] - F[5]) + (F[0] - F[1])*(F[6] - F[4]) - (i0 - F[4])*(-F[2] + F[3] + F[0] - F[1]) - (F[4] - F[5])*(F[2] - F[0]);
const float c = (i0 - F[0])*(F[6] - F[4]) - (i0 - F[4])*(F[2] - F[0]);;
float d = b*b - 4 * a*c;
if (d > 0) {
d = std::sqrt(d);
// compute u-coord of solutions
ui[0] = (-b - d) / (2 * a);
ui[1] = (-b + d) / (2 * a);
// compute v-coord of solutions
float g1 = F[0] * (1 - ui[0]) + F[1] * ui[0];
float g2 = F[2] * (1 - ui[0]) + F[3] * ui[0];
vi[0] = (i0 - g1) / (g2 - g1);
if (isnan(vi[0]) || isinf(vi[0])) {
vi[0] = -1.f;
}
g1 = F[0] * (1 - ui[1]) + F[1] * ui[1];
g2 = F[2] * (1 - ui[1]) + F[3] * ui[1];
vi[1] = (i0 - g1) / (g2 - g1);
if (isnan(vi[1]) || isinf(vi[1])) {
vi[1] = -1.f;
}
// compute w-coordinates of solutions
g1 = F[0] * (1 - ui[0]) + F[1] * ui[0];
g2 = F[4] * (1 - ui[0]) + F[5] * ui[0];
wi[0] = (i0 - g1) / (g2 - g1);
if (isnan(wi[0]) || isinf(wi[0])) {
wi[0] = -1.f;
}
g1 = F[0] * (1 - ui[1]) + F[1] * ui[1];
g2 = F[4] * (1 - ui[1]) + F[5] * ui[1];
wi[1] = (i0 - g1) / (g2 - g1);
if (isnan(wi[1]) || isinf(wi[1])) {
wi[1] = -1.f;
}
// correct values for roots of quadratic equations
// in case the asymptotic decider has failed
if (f_flag & BIT_1) { // face 1, w = 0;
if (wi[0] < wi[1]) wi[0] = 0;
else wi[1] = 0;
}
if (f_flag & BIT_2) { // face 2, w = 1
if (wi[0] > wi[1]) wi[1] = 1;
else wi[1] = 1;
}
if (f_flag & BIT_3) { // face 3, v = 0
if (vi[0] < vi[1]) vi[0] = 0;
else vi[1] = 0;
}
if (f_flag & BIT_4) { // face 4, v = 1
if (vi[0] > vi[1]) vi[0] = 1;
else vi[1] = 1;
}
if (f_flag & BIT_5) { // face 5, u = 0
if (ui[0] < ui[1]) ui[0] = 0;
else ui[1] = 0;
}
if (f_flag & BIT_6) { // face 6, u = 1
if (ui[0] > ui[1]) ui[0] = 1;
else ui[1] = 1;
}
// check solution intervals
if (0 < ui[0] && ui[0] < 1) {
q_sol |= 1;
}
if (0 < ui[1] && ui[1] < 1) {
q_sol |= 2;
}
if (0 < vi[0] && vi[0] < 1) {
q_sol |= 4;
}
if (0 < vi[1] && vi[1] < 1) {
q_sol |= 8;
}
if (0 < wi[0] && wi[0] < 1) {
q_sol |= 16;
}
if (0 < wi[1] && wi[1] < 1) {
q_sol |= 32;
}
}
// compute the number of solutions to the quadratic equation for a given face
auto nrQSolFace = [](const uint f, const unsigned char n) {
uint nr{ 0 };
switch (f) {
case 0:
if ((n & 0x5) == 0x5)
nr = nr + 1;
if ((n & 0xA) == 0xA)
nr = nr + 1;
break;
case 1:
if ((n & 0x11) == 0x11) nr = nr + 1;
if ((n & 0x22) == 0x22) nr = nr + 1;
break;
case 2:
if ((n & 0x18) == 0x18) nr = nr + 1;
if ((n & 0x24) == 0x24) nr = nr + 1;
break;
}
return nr;
};
// triangulate contours
// if all bits are set, then there are three pairs of nontrivial solutions
// to the quadratic equations. In this case, there is a tunnel or a contour
// with 12 vertices. If there are three contours, then there is a tunnel and
// one of the contorus with only three vertices is not part of it.
// Triangles are stored in global memory starting at offset
// count nr. of inner vertices to compute right global index
// first inner vertex has index cell_global_index + 3;
int v_count{ 3 };
if (numberOfSetBits<unsigned char>(q_sol) == 6) {
// there are at most three contours
// Possible cases:
// 1) a single contour with 12 vertices
// 2) two contours which build a tunnel
// 3) three contours, one has only 3 vertices and does not belong to the tunnel
// construct the six vertices of the inner hexagon
float3 hvt[6];
hvt[0].x = ui[0]; hvt[0].y = vi[0]; hvt[0].z = wi[0];
hvt[1].x = ui[0]; hvt[1].y = vi[0]; hvt[1].z = wi[1];
hvt[2].x = ui[1]; hvt[2].y = vi[0]; hvt[2].z = wi[1];
hvt[3].x = ui[1]; hvt[3].y = vi[1]; hvt[3].z = wi[1];
hvt[4].x = ui[1]; hvt[4].y = vi[1]; hvt[4].z = wi[0];
hvt[5].x = ui[0]; hvt[5].y = vi[1]; hvt[5].z = wi[0];
// construct vertices at intersections with the edges
auto e_vert = [&ecoord](const int e, const int i) {
const unsigned int l_coord[3]{ 1324855, 5299420, 16733440 };
unsigned char flag = (l_coord[i] >> (2 * e)) & 3;
if (flag == 3)
return ecoord[e];
else
return (float)(flag);
};
// if there are three contours, then there is a tunnel and one
// of the contours is not part of it.
unsigned char _not_tunnel = 0xF;
if (cnt_ == 3) {
// loop over the contorus
// triangulate the contour which is not part of
// the tunnel
const float uc_min = (ui[0] < ui[1]) ? ui[0] : ui[1];
const float uc_max = (ui[0] < ui[1]) ? ui[1] : ui[0];
for (int t = 0; t < (int)cnt_; t++) {
if (get_cnt_size(t, c_) == 3) {
float umin = 2;
float umax = -2;
uint e0 = get_c(t, 0, c_);
uint e1 = get_c(t, 1, c_);
uint e2 = get_c(t, 2, c_);
const float u_e0 = e_vert(e0, 0);
const float u_e1 = e_vert(e1, 0);
const float u_e2 = e_vert(e2, 0);
umin = (u_e0 < umin) ? u_e0 : umin;
umin = (u_e1 < umin) ? u_e1 : umin;
umin = (u_e2 < umin) ? u_e2 : umin;
umax = (u_e0 > umax) ? u_e0 : umax;
umax = (u_e1 > umax) ? u_e1 : umax;
umax = (u_e2 > umax) ? u_e1 : umax;
if (uc_min > umax || uc_max < umin) {
// this contour is not part of the tunnel
_not_tunnel = t;
// save triangle in global memory
addHalfedges(he_, het_, v_gindex[e0], v_gindex[e1], v_gindex[e2]);
//const int a_ = atomicAdd(he_cnt, 3);
//addHalfedges(nr_he, he_e, he_table, he_ids, a_, v_gindex[e0], v_gindex[e1], v_gindex[e2]);
//t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[e0], v_gindex[e1], v_gindex[e2], 0);
}
}
}
}
// compute vertices of inner hexagon, save new vertices in list and compute and keep
// global vertice index to build triangle connectivity later on.
int tg_idx[6];
for (int i = 0; i < 6; i++) {
float4 hp;
float4 hn;
// local coordinates for trilinear interpolation
const float u = hvt[i].x; const float v = hvt[i].y; const float w = hvt[i].z;
hp.x = (1 - w)*((1 - v)*(p[0].x + u*(p[1].x - p[0].x)) + v*(p[2].x + u*(p[3].x - p[2].x))) + w*((1 - v)*(p[4].x + u*(p[5].x - p[4].x)) + v*(p[6].x + u*(p[7].x - p[6].x)));
hp.y = (1 - w)*((1 - v)*(p[0].y + u*(p[1].y - p[0].y)) + v*(p[2].y + u*(p[3].y - p[2].y))) + w*((1 - v)*(p[4].y + u*(p[5].y - p[4].y)) + v*(p[6].y + u*(p[7].y - p[6].y)));
hp.z = (1 - w)*((1 - v)*(p[0].z + u*(p[1].z - p[0].z)) + v*(p[2].z + u*(p[3].z - p[2].z))) + w*((1 - v)*(p[4].z + u*(p[5].z - p[4].z)) + v*(p[6].z + u*(p[7].z - p[6].z)));
hn.x = (1 - w)*((1 - v)*(n[0].x + u*(n[1].x - n[0].x)) + v*(n[2].x + u*(n[3].x - n[2].x))) + w*((1 - v)*(n[4].x + u*(n[5].x - n[4].x)) + v*(n[6].x + u*(n[7].x - n[6].x)));
hn.y = (1 - w)*((1 - v)*(n[0].y + u*(n[1].y - n[0].y)) + v*(n[2].y + u*(n[3].y - n[2].y))) + w*((1 - v)*(n[4].y + u*(n[5].y - n[4].y)) + v*(n[6].y + u*(n[7].y - n[6].y)));
hn.z = (1 - w)*((1 - v)*(n[0].z + u*(n[1].z - n[0].z)) + v*(n[2].z + u*(n[3].z - n[2].z))) + w*((1 - v)*(n[4].z + u*(n[5].z - n[4].z)) + v*(n[6].z + u*(n[7].z - n[6].z)));
// normalize normal
const float factor = std::sqrt(hn.x * hn.x + hn.y * hn.y + hn.z * hn.z);
hn.x = hn.x / factor;
hn.y = hn.y / factor;
hn.z = hn.z / factor;
// the fourth coord.
hp.w = 1.f;
hn.w = 0.f;
// this vertices are inner vertices
tg_idx[i] = insert_vertex(int(9 * gl_index + v_count),ht_,v_,hp,hn);
//int v_addr{ -1 };
//if (insert_vertex_key(tg_idx[i], ht_, v_addr)) {
// //const int pos = atomicAdd(v_.t_size, 1);
// //v_.vertices[pos] = hp;
// //v_.normals[pos] = hn;
// //v_.v[pos].v = hp;
// //v_.v[pos].n = hn;
// //ht_.addr[v_addr] = pos;
// ht_.addr[v_addr] = v_.set(hp, hn);
//}
// update nr. of vertices
v_count++;
}
// triangulate contours with inner hexagon
unsigned char tcon_[12];
for (int i = 0; i < (int)cnt_; i++) {
if (_not_tunnel != i) { // contour belongs to tunnel
const int cnt_sz = (int)get_cnt_size(i, c_);
for (int r = 0; r < cnt_sz; r++) {
int index = -1;
double dist = 1000.;
uint ci = get_c(i, r, c_);
const double u_edge = e_vert(ci, 0);
const double v_edge = e_vert(ci, 1);
const double w_edge = e_vert(ci, 2);
for (int s = 0; s < 6; s++) {
const double uval = u_edge - hvt[s].x;
const double vval = v_edge - hvt[s].y;
const double wval = w_edge - hvt[s].z;
double val = uval*uval + vval*vval + wval*wval;
if (dist > val) {
index = s;
dist = val;
}
}
tcon_[ci] = (unsigned char)index;
}
// correspondence between vertices found
// create triangles
// needs some functions
auto distanceRingIntsModulo = [](const int d1, const int d2) {
const int r = (d1 - d2) < 0 ? d2 - d1 : d1 - d2;
return (r > 2 ? 6 - r : r);
};
auto midpointRingIntModulo = [](const int d1, const int d2) {
const int dmax = (d1 > d2) ? d1 : d2;
const int dmin = (d1 < d2) ? d1 : d2;
return ((dmax + 2) % 6 == dmin) ? (dmax + 1) % 6 : (dmax + dmin) / 2;
};
for (int r = 0; r < cnt_sz; r++) {
const uint tid1 = get_c(i, r, c_);
const uint tid2 = get_c(i, ((r + 1) % cnt_sz), c_);
const uint cid1 = tcon_[tid1];
const uint cid2 = tcon_[tid2];
// compute index distance
const int dst = distanceRingIntsModulo(cid1, cid2);
switch (dst)
{
case 0:
{
addHalfedges(he_, het_, v_gindex[tid1], v_gindex[tid2], tg_idx[cid1]);
//const int a_ = atomicAdd(he_cnt, 3);
//addHalfedges(nr_he, he_e, he_table, he_ids, a_, v_gindex[tid1], v_gindex[tid2], tg_idx[cid1]);
//t_.triangles[atomicAdd(t_.t_size, 1)] = make_int4(v_gindex[tid1], v_gindex[tid2], tg_idx[cid1], 0);
}
break;
case 1:
{
// measure diagonals
// triangulate along shortest diagonal
float u_edge = e_vert(tid1, 0);
float v_edge = e_vert(tid1, 1);
float w_edge = e_vert(tid1, 2);
const float l1 = (u_edge - hvt[cid2].x)*(u_edge - hvt[cid2].x) + (v_edge - hvt[cid2].y)*(v_edge - hvt[cid2].y) + (w_edge - hvt[cid2].z)*(w_edge - hvt[cid2].z);
u_edge = e_vert(tid2, 0);
v_edge = e_vert(tid2, 1);
w_edge = e_vert(tid2, 2);
const double l2 = (u_edge - hvt[cid1].x)*(u_edge - hvt[cid1].x) + (v_edge - hvt[cid1].y)*(v_edge - hvt[cid1].y) + (w_edge - hvt[cid1].z)*(w_edge - hvt[cid1].z);
if (l1 < l2) {
addHalfedges(he_, het_, v_gindex[tid1], v_gindex[tid2], tg_idx[cid2]);
addHalfedges(he_, het_, v_gindex[tid1], tg_idx[cid2], tg_idx[cid1]);
}
else {
addHalfedges(he_, het_, v_gindex[tid1], v_gindex[tid2], tg_idx[cid1]);
addHalfedges(he_, het_, v_gindex[tid2], tg_idx[cid2], tg_idx[cid1]);
}
}
break;
case 2:
{
const int cidm = midpointRingIntModulo(cid1, cid2);
addHalfedges(he_, het_, v_gindex[tid1], v_gindex[tid2], tg_idx[cidm]);
addHalfedges(he_, het_, v_gindex[tid1], tg_idx[cidm], tg_idx[cid1]);
addHalfedges(he_, het_, v_gindex[tid2], tg_idx[cid2], tg_idx[cidm]);
}
break;
} // switch
} // for loop over the vertices of the contour
} // if (_not_tunnel)
} // for loop over contours
if (cnt_ == 1) {
// there is a single contour
// triangulate and close inner hexagon
addHalfedges(he_, het_, tg_idx[0], tg_idx[2], tg_idx[1]);
addHalfedges(he_, het_, tg_idx[2], tg_idx[4], tg_idx[3]);
addHalfedges(he_, het_, tg_idx[0], tg_idx[5], tg_idx[4]);
addHalfedges(he_, het_, tg_idx[0], tg_idx[4], tg_idx[2]);
}
}
else {
// there is no tunnel
// handle case with no saddle point as simple polygons with 3, 4, 5 or six vertices
const unsigned char nr_u{ (unsigned char)nrQSolFace(0, q_sol) };
const unsigned char nr_v{ (unsigned char)nrQSolFace(1, q_sol) };
const unsigned char nr_w{ (unsigned char)nrQSolFace(2, q_sol) };
const unsigned char nr_t{ (unsigned char)(nr_u + nr_v + nr_w) };
if (nr_t == nr_u || nr_t == nr_v || nr_t == nr_w) {
// loop over all contours
for (int i = 0; i < (int)cnt_; i++) {
switch (get_cnt_size(i, c_)) {
case 3:
{
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)]);
}
break;
case 4:
{
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)]);
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 2, c_)], v_gindex[get_c(i, 3, c_)]);
}
break;
case 5:
{
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)]);
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 2, c_)], v_gindex[get_c(i, 3, c_)]);
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 3, c_)], v_gindex[get_c(i, 4, c_)]);
}
break;
case 6:
{
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 3, c_)]);
addHalfedges(he_, het_, v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)], v_gindex[get_c(i, 3, c_)]);
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 3, c_)], v_gindex[get_c(i, 4, c_)]);
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 4, c_)], v_gindex[get_c(i, 5, c_)]);
}
break;
} // switch over size of contour
} // loop over contorus
} // thre are no saddle points
else {
// there are saddle points
//fc1 = fs(1, 1)*fs(2, 1) + fs(1, 2)*fs(2, 2);
//fc2 = fs(1, 1)*fs(3, 1) + fs(1, 2)*fs(3, 2);
//fc3 = fs(2, 1)*fs(3, 2) + fs(2, 2)*fs(3, 1);
unsigned char fs[3][2]{ { (unsigned char)(q_sol & 1), (unsigned char)((q_sol >> 1) & 1) },{ (unsigned char)((q_sol >> 2) & 1), (unsigned char)((q_sol >> 3) & 1) },{ (unsigned char)((q_sol >> 4) & 1), (unsigned char)((q_sol >> 5) & 1) } };
const unsigned char fc1 = fs[0][0] * fs[1][0] + fs[0][1] * fs[1][1];
const unsigned char fc2 = fs[0][0] * fs[2][0] + fs[0][1] * fs[2][1];
const unsigned char fc3 = fs[1][0] * fs[2][1] + fs[1][1] * fs[2][0];
const unsigned char c_faces = fc1 + fc2 + fc3;
float ucoord{};
float vcoord{};
float wcoord{};
switch (c_faces) {
case 2:
{
if (fc1 == 0) {
ucoord = fs[0][0] * ui[0] + fs[0][1] * ui[1];
vcoord = fs[1][0] * vi[0] + fs[1][1] * vi[1];
wcoord = fs[1][0] * wi[1] + fs[1][1] * wi[0];
}
else if (fc2 == 0) {
ucoord = fs[0][0] * ui[0] + fs[0][1] * ui[1];
vcoord = fs[0][0] * vi[0] + fs[0][1] * vi[1];
wcoord = fs[0][0] * wi[1] + fs[0][1] * wi[0];
}
else if (fc3 == 0) {
ucoord = fs[1][0] * ui[0] + fs[1][1] * ui[1];
vcoord = fs[1][0] * vi[0] + fs[1][1] * vi[1];
wcoord = fs[1][0] * wi[0] + fs[1][1] * wi[1];
}
}
break;
case 3:
{
ucoord = (fs[0][0] * ui[0] + fs[0][1] * ui[1]) / (fs[0][0] + fs[0][1]);
vcoord = (fs[1][0] * vi[0] + fs[1][1] * vi[1]) / (fs[1][0] + fs[1][1]);
wcoord = (fs[2][0] * wi[0] + fs[2][1] * wi[1]) / (fs[2][0] + fs[2][1]);
}
break;
case 4:
{
const unsigned char nr_u = fs[0][0] + fs[0][1];
const unsigned char nr_v = fs[1][0] + fs[1][1];
const unsigned char nr_w = fs[2][0] + fs[2][1];
if (nr_w == 1) {
ucoord = fs[2][0] * ui[0] + fs[2][1] * ui[1];
vcoord = fs[2][1] * vi[0] + fs[2][0] * vi[1];
wcoord = fs[2][0] * wi[0] + fs[2][1] * wi[1];
}
else if (nr_v == 1) {
ucoord = fs[1][0] * ui[0] + fs[1][1] * ui[1];
vcoord = fs[1][0] * vi[0] + fs[1][1] * vi[1];
wcoord = fs[1][1] * wi[0] + fs[1][0] * wi[1];
}
else if (nr_u == 1) {
ucoord = fs[0][0] * ui[0] + fs[0][1] * ui[1];
vcoord = fs[0][0] * vi[0] + fs[0][1] * vi[1];
wcoord = fs[0][0] * wi[0] + fs[0][1] * wi[1];
}
}
break;
} // switch(c_faces)
// create inner vertex
float4 ip;
float4 in;
ip.x = (1 - wcoord)*((1 - vcoord)*(p[0].x + ucoord*(p[1].x - p[0].x)) + vcoord*(p[2].x + ucoord*(p[3].x - p[2].x))) + wcoord*((1 - vcoord)*(p[4].x + ucoord*(p[5].x - p[4].x)) + vcoord*(p[6].x + ucoord*(p[7].x - p[6].x)));
ip.y = (1 - wcoord)*((1 - vcoord)*(p[0].y + ucoord*(p[1].y - p[0].y)) + vcoord*(p[2].y + ucoord*(p[3].y - p[2].y))) + wcoord*((1 - vcoord)*(p[4].y + ucoord*(p[5].y - p[4].y)) + vcoord*(p[6].y + ucoord*(p[7].y - p[6].y)));
ip.z = (1 - wcoord)*((1 - vcoord)*(p[0].z + ucoord*(p[1].z - p[0].z)) + vcoord*(p[2].z + ucoord*(p[3].z - p[2].z))) + wcoord*((1 - vcoord)*(p[4].z + ucoord*(p[5].z - p[4].z)) + vcoord*(p[6].z + ucoord*(p[7].z - p[6].z)));
in.x = (1 - wcoord)*((1 - vcoord)*(n[0].x + ucoord*(n[1].x - n[0].x)) + vcoord*(n[2].x + ucoord*(n[3].x - n[2].x))) + wcoord*((1 - vcoord)*(n[4].x + ucoord*(n[5].x - n[4].x)) + vcoord*(n[6].x + ucoord*(n[7].x - n[6].x)));
in.y = (1 - wcoord)*((1 - vcoord)*(n[0].y + ucoord*(n[1].y - n[0].y)) + vcoord*(n[2].y + ucoord*(n[3].y - n[2].y))) + wcoord*((1 - vcoord)*(n[4].y + ucoord*(n[5].y - n[4].y)) + vcoord*(n[6].y + ucoord*(n[7].y - n[6].y)));
in.z = (1 - wcoord)*((1 - vcoord)*(n[0].z + ucoord*(n[1].z - n[0].z)) + vcoord*(n[2].z + ucoord*(n[3].z - n[2].z))) + wcoord*((1 - vcoord)*(n[4].z + ucoord*(n[5].z - n[4].z)) + vcoord*(n[6].z + ucoord*(n[7].z - n[6].z)));
// normalize normal
const float factor = std::sqrt(in.x * in.x + in.y * in.y + in.z * in.z);
in.x = in.x / factor;
in.y = in.y / factor;
in.z = in.z / factor;
// the fourth coordinate
ip.w = 1.f;
in.w = 0.f;
// global index
int gidx = int(9 * gl_index + v_count);
// this point is only used if contours with more than three vertices
// are present
for (int i = 0; i < (int)cnt_; i++) {
if (get_cnt_size(i, c_) > 3) {
gidx = insert_vertex(gidx, ht_, v_, ip, in);
}
}
//bool pt_used{ false };
// loop over the contorus
for (int i = 0; i < (int)cnt_; i++) {
const unsigned char cnt_sz = (unsigned char)get_cnt_size(i, c_);
if (cnt_sz == 3) {
addHalfedges(he_, het_, v_gindex[get_c(i, 0, c_)], v_gindex[get_c(i, 1, c_)], v_gindex[get_c(i, 2, c_)]);
}
else {
//pt_used = true;
for (int t = 0; t < cnt_sz; t++) {
// add triangle to list
addHalfedges(he_, het_, v_gindex[get_c(i, t, c_)], v_gindex[get_c(i, (t + 1) % cnt_sz, c_)], gidx);
}
}
}
} // else - there are saddle points
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// HOST CODE
//
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// read a volume data from file
void
p_mc::MarchingCubes::readDataFromFile(const std::string& i_file, std::array<int,3>& dim, std::array<float,3>& origin, std::array<float,3>& spacing, std::vector<float>& v_data)
{
std::FILE* f{ nullptr };
errno_t status = fopen_s(&f, i_file.c_str(), "rb");
if (status != 0) {
std::cerr << "ERROR: can't open file " << i_file.c_str() << std::endl;
exit(1);
}
short x_size;
short y_size;
short z_size;
std::fread(&x_size, sizeof(unsigned short), 1, f);
std::fread(&y_size, sizeof(unsigned short), 1, f);
std::fread(&z_size, sizeof(unsigned short), 1, f);
float dx;
float dy;
float dz;
std::fread(&dx, sizeof(float), 1, f);
std::fread(&dy, sizeof(float), 1, f);
std::fread(&dz, sizeof(float), 1, f);
int v_size = x_size * y_size * z_size;
ushort* v_buff = new ushort[v_size];
std::fread(&v_buff[0], sizeof(unsigned short), v_size, f);
// fill into host vector
v_data.resize(v_size);
for (int i = 0; i < v_size; i++) {
v_data[i] = float(v_buff[i]);
}
std::fclose(f);
delete[] v_buff;
// set uniform grid data
dim[0] = x_size;
dim[1] = y_size;
dim[2] = z_size;
origin[0] = 0.f;
origin[1] = 0.f;
origin[2] = 0.f;
spacing[0] = dx;
spacing[1] = dy;
spacing[2] = dz;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Main function to process a volume data set
void
p_mc::MarchingCubes::mc_halfedge(const float i0,const std::string& i_file, int& nr_v, float** vertices, float** normals, int& nr_t, int4** h_hee, int** h_hev,int** h_hef)
{
Halfedges he_;
HalfedgeHashTable het_;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
std::cout << " ... read data from file\n";
std::vector<float> h_data;
UGrid ugrid;
std::array<int, 3> dims;
std::array<float, 3> origin;
std::array<float, 3> spacing;
readDataFromFile(i_file, dims, origin, spacing, h_data);
ugrid.size(dims[0], dims[1], dims[2]);
ugrid.dx = spacing[0];
ugrid.dy = spacing[1];
ugrid.dz = spacing[2];
ugrid.x0 = origin[0];
ugrid.y0 = origin[1];
ugrid.z0 = origin[2];
// measure processing time
CTimer ctimer1;
CTimer ctimer2;
// allocate 3D texture
std::cout << " ... allocate 3D texture\n";
const size_t x_size = (size_t)ugrid.idim;
const size_t y_size = (size_t)ugrid.jdim;
const size_t z_size = (size_t)ugrid.kdim;
const size_t t_size = x_size * y_size * z_size;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// create texture buffer for 3D data
// copy data to buffer
cudaArray* d_data;
cudaExtent extent = make_cudaExtent(x_size, y_size, z_size);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_data, &desc, extent);
//cudaCheckError();
cudaMemcpy3DParms params{ 0 };
params.srcPtr = make_cudaPitchedPtr(&(h_data[0]), x_size * sizeof(float), x_size, y_size);
params.dstArray = d_data;
params.extent = extent;
params.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(¶ms);
//cudaCheckError();
// create Texture object
// Texture description
cudaTextureDesc texDesc{};
memset(&texDesc, 0, sizeof(cudaTextureDesc));
texDesc.readMode = cudaReadModeElementType;
texDesc.filterMode = cudaFilterModePoint;
// Texture resource description
cudaResourceDesc resDesc{};
memset(&resDesc, 0, sizeof(cudaResourceDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = d_data;
// create Texture object
cudaCreateTextureObject(&m_volume, &resDesc, &texDesc, nullptr);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// copy lookup tables
//ctimer2.start();
MC_lookup l_tables;
initMC_lookup(l_tables, e_pattern, t_pattern, t_ambig);
//ctimer2.stop();
//ctimer2.print(std::string(" ... lookup tables"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// globa processing time
std::cout << " ... compute isosurface\n";
ctimer1.start();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Atomic counter
// Alloc memory for counting nr. of vertices
//ctimer2.start();
CellsIds cells;
initCells(cells,(int)(t_size / 3));
AmbiguousCells acells;
initACells(acells,(int)(t_size / 4));
int* d_vcount{ nullptr };
int* d_acount{ nullptr };
cudaMalloc(&d_vcount, sizeof(int));
cudaMemset(d_vcount, 0, sizeof(int));
cudaMalloc(&d_acount, sizeof(int));
cudaMemset(d_acount, 0, sizeof(int));
uint b_size = 512;
uint g_size{ ((uint)t_size + b_size - 1) / b_size };
mc_count << < g_size, b_size >> >(cells, acells, d_vcount, d_acount, i0, m_volume, ugrid, l_tables);
//cudaCheckError();
// count
// each vertex is counted four times, except those at the boundaries
// inner vertices are overestimated.
//nr_v = (int)(warpReduce<int>(d_vcount, (int)t_size)) / 2;
// read array size
//int nr_cells{ 0 };
//cudaMemcpy(&nr_cells, d_cpos, sizeof(int), cudaMemcpyDeviceToHost);
//cudaCheckError();
int nr1{ 0 };
int nr2{ 0 };
cudaMemcpy(&nr1, d_vcount, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&nr2, d_acount, sizeof(int), cudaMemcpyDeviceToHost);
nr_v = (nr1 + nr2) / 2;
//ctimer2.stop();
//ctimer2.print(std::string(" ... count vertices"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute triangles
// 1. alloc memory for hash table
// 2. alloc memory for vertices
// 3. alloc memory for triangles
// 4. alloc memory for computing cell ids of ambiguous cases
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 1. alloc and init hash table
//ctimer2.start();
VertexHashTable ht_;
initVertexHashTable(ht_, nr_v); // appro. two times the number of vertices
b_size = 512;
g_size = (ht_.t_size + b_size - 1) / b_size;
init_hash_table << < g_size, b_size >> >(ht_);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 2. alloc and init vertices
Vertices v_;
initVertices(v_, nr_v);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 3. alloc and init triangles
Triangles t_;
initTriangles(t_, 2 * nr_v);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute iso-surface
const int nr_cells = size<CellsIds>(cells);
const int nr_acells = size<AmbiguousCells>(acells);
b_size = MC_BLOCKSIZE;
g_size = (nr_cells + b_size - 1) / b_size;
mc_slice << < g_size, b_size >> > (i0, m_volume, ugrid, l_tables, nr_cells, cells, ht_, v_, t_);
//cudaCheckError();
//ctimer2.stop();
//ctimer2.print(std::string(" ... mc_slice()"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute ambiguous cases
//ctimer2.start();
b_size = AMB_BLOCKSIZE;
g_size = (nr_acells + b_size - 1) / b_size;
t_slice << < g_size, b_size >> >(i0, m_volume, ugrid, l_tables, nr_acells, acells, ht_, v_, t_);
//cudaCheckError();
//ctimer2.stop();
//ctimer2.print(std::string(" ... ambiguous cases"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// don't need volume data any more
//cudaFreeArray(d_data);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute shared vertex list for triangle mesh
// indices of triangles have to be mapped to vertex index in vertex array
// get number of vertices
//ctimer2.start();
nr_v = size<Vertices>(v_);
// get number of triangles
nr_t = size<Triangles>(t_);
// map triangles indices
b_size = 512;
g_size = (3 * nr_t + b_size - 1) / b_size;
map_triangles_fast <<< g_size, b_size >>>(nr_t,ht_, t_);
//cudaCheckError();
//ctimer2.stop();
//ctimer2.print(std::string(" ... map triangles"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// create halfedge data structure for triangle mesh
// 1. there are three edges for each vertex
// an edge points to origin vertex
// an edge points to the face
// an edge points to next edge
// 2. each vertex has points to a halfedge
// 3. each triangle points to a halfedge
// Data structure:
// halfedge int4:
// he.x = origin vertex
// he.y = face
// he.z = next
// he.w = tween
// vertex int: point to one halfedge
// face int: point to one halfedge of this face
//ctimer2.start();
const int nr_he = 3 * nr_t;
initHalfedges(he_, nr_he, nr_v, nr_t);
initHalfedgeHashTable(het_, 2 * nr_he); // the hash table is 1.5 the total nr. of halfedges
//cudaCheckError();
//ctimer2.stop();
//ctimer2.print(std::string(" ... allocate memory for halfedge data structure"));
// for each triangle create three halfedges
// compute unique halfedge twin ids and store in hash table
//ctimer2.start();
b_size = 256;
g_size = (nr_he + b_size - 1) / b_size;
create_halfedge << < g_size, b_size >> > (nr_he, t_, he_, het_);
//ctimer2.stop();
//ctimer2.print(std::string(" ... create halfedges"));
// connect each half edge with its twin
// process each entrie in halfedge table
//ctimer2.start();
g_size = (het_.t_size + b_size - 1) / b_size;
map_halfedge_twins_fast << < g_size, b_size >> > (he_, het_);
//ctimer2.stop();
//ctimer2.print(std::string(" ... map halfedges"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute processing time
ctimer1.stop();
cudaDeviceSynchronize();
ctimer1.print(std::string("tmc"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// copy data back to host
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Shared vertex list
float4* v_array = new float4[nr_v];
float4* n_array = new float4[nr_v];
int4* t_array = new int4[nr_t];
cudaMemcpy(v_array, v_.vertices, nr_v * sizeof(float4), cudaMemcpyDeviceToHost);
cudaMemcpy(n_array, v_.normals, nr_v * sizeof(float4), cudaMemcpyDeviceToHost);
cudaMemcpy(t_array, t_.triangles, nr_t * sizeof(int4), cudaMemcpyDeviceToHost);
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// halfedge data structure
*h_hee = new int4[nr_he];
*h_hev = new int[nr_v];
*h_hef = new int[nr_t];
cudaMemcpy(*h_hee, he_.he_e, nr_he * sizeof(int4), cudaMemcpyDeviceToHost);
cudaMemcpy(*h_hev, he_.he_v, nr_v * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(*h_hef, he_.he_f, nr_t * sizeof(int), cudaMemcpyDeviceToHost);
std::cout << " ... total nr. of vertices " << nr_v << std::endl;
std::cout << " ... total nr. of triangles " << nr_t << std::endl;
std::cout << " ... total nr. of unambiguous cells " << nr1 << std::endl;
std::cout << " ... total nr. of ambiguous cells " << nr2 << std::endl;
*vertices = new float[3 * nr_v];
*normals = new float[3 * nr_v];
for (int id = 0; id < nr_v; id++) {
// copy vertices
(*vertices)[3 * id] = v_array[id].x;
(*vertices)[3 * id + 1] = v_array[id].y;
(*vertices)[3 * id + 2] = v_array[id].z;
// copy normals
(*normals)[3 * id] = -n_array[id].x;
(*normals)[3 * id + 1] = -n_array[id].y;
(*normals)[3 * id + 2] = -n_array[id].z;
}
std::cout << " ... done\n";
// host memory
// host memory
delete[] v_array;
delete[] n_array;
delete[] t_array;
//delete[] h_hee;
//delete[] h_hev;
//delete[] h_hef;
// free common data
// free memory
cudaFreeArray(d_data);
freeMC_lookup(l_tables);
freeVertices(v_);
freeTriangles(t_);
freeHalfedges(he_);
freeHalfedgeHashTable(het_);
freeCells(cells);
freeACells(acells);
// arrays for vertex count
cudaFree(d_acount);
cudaFree(d_vcount);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// HALFEDGE RECONSTRUCTION
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Main function to process a volume data set
void
p_mc::MarchingCubes::mc_sharedvertex(const float i0, const std::string& i_file, int& nr_v, float** vertices, float** normals, int& nr_t, int** triangles)
{
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
std::cout << " ... read data from file\n";
std::vector<float> h_data;
UGrid ugrid;
std::array<int, 3> dims;
std::array<float, 3> origin;
std::array<float, 3> spacing;
readDataFromFile(i_file, dims, origin, spacing, h_data);
ugrid.size(dims[0], dims[1], dims[2]);
ugrid.dx = spacing[0];
ugrid.dy = spacing[1];
ugrid.dz = spacing[2];
ugrid.x0 = origin[0];
ugrid.y0 = origin[1];
ugrid.z0 = origin[2];
// measure processing time
CTimer ctimer1;
CTimer ctimer2;
// allocate 3D texture
std::cout << " ... allocate 3D texture\n";
const size_t x_size = (size_t)ugrid.idim;
const size_t y_size = (size_t)ugrid.jdim;
const size_t z_size = (size_t)ugrid.kdim;
const size_t t_size = x_size * y_size * z_size;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// create texture buffer for 3D data
// copy data to buffer
cudaArray* d_data;
cudaExtent extent = make_cudaExtent(x_size, y_size, z_size);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_data, &desc, extent);
//cudaCheckError();
cudaMemcpy3DParms params{ 0 };
params.srcPtr = make_cudaPitchedPtr(&(h_data[0]), x_size * sizeof(float), x_size, y_size);
params.dstArray = d_data;
params.extent = extent;
params.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(¶ms);
//cudaCheckError();
// create Texture object
// Texture description
cudaTextureDesc texDesc{};
memset(&texDesc, 0, sizeof(cudaTextureDesc));
texDesc.readMode = cudaReadModeElementType;
texDesc.filterMode = cudaFilterModePoint;
// Texture resource description
cudaResourceDesc resDesc{};
memset(&resDesc, 0, sizeof(cudaResourceDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = d_data;
// create Texture object
cudaCreateTextureObject(&m_volume, &resDesc, &texDesc, nullptr);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// copy lookup tables
//ctimer2.start();
MC_lookup l_tables;
initMC_lookup(l_tables, e_pattern, t_pattern, t_ambig);
//ctimer2.stop();
//ctimer2.print(std::string(" ... lookup tables"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// globa processing time
std::cout << " ... compute isosurface\n";
ctimer1.start();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Atomic counter
// Alloc memory for counting nr. of vertices
//ctimer2.start();
CellsIds cells;
initCells(cells, (int)(t_size / 3));
AmbiguousCells acells;
initACells(acells, (int)(t_size / 4));
int* d_vcount{ nullptr };
int* d_acount{ nullptr };
cudaMalloc(&d_vcount, sizeof(int));
cudaMemset(d_vcount, 0, sizeof(int));
cudaMalloc(&d_acount, sizeof(int));
cudaMemset(d_acount, 0, sizeof(int));
uint b_size = 512;
uint g_size{ ((uint)t_size + b_size - 1) / b_size };
mc_count << < g_size, b_size >> >(cells, acells, d_vcount, d_acount, i0, m_volume, ugrid, l_tables);
//cudaCheckError();
// count
// each vertex is counted four times, except those at the boundaries
// inner vertices are overestimated.
//nr_v = (int)(warpReduce<int>(d_vcount, (int)t_size)) / 2;
// read array size
//int nr_cells{ 0 };
//cudaMemcpy(&nr_cells, d_cpos, sizeof(int), cudaMemcpyDeviceToHost);
//cudaCheckError();
int nr1{ 0 };
int nr2{ 0 };
cudaMemcpy(&nr1, d_vcount, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&nr2, d_acount, sizeof(int), cudaMemcpyDeviceToHost);
nr_v = (nr1 + nr2) / 2;
//ctimer2.stop();
//ctimer2.print(std::string(" ... count vertices"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute triangles
// 1. alloc memory for hash table
// 2. alloc memory for vertices
// 3. alloc memory for triangles
// 4. alloc memory for computing cell ids of ambiguous cases
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 1. alloc and init hash table
//ctimer2.start();
VertexHashTable ht_;
initVertexHashTable(ht_, nr_v); // appro. two times the number of vertices
b_size = 512;
g_size = (ht_.t_size + b_size - 1) / b_size;
init_hash_table << < g_size, b_size >> >(ht_);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 2. alloc and init vertices
Vertices v_;
initVertices(v_, nr_v);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 3. alloc and init triangles
Triangles t_;
initTriangles(t_, 2 * nr_v);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute iso-surface
const int nr_cells = size<CellsIds>(cells);
const int nr_acells = size<AmbiguousCells>(acells);
b_size = MC_BLOCKSIZE;
g_size = (nr_cells + b_size - 1) / b_size;
mc_slice << < g_size, b_size >> > (i0, m_volume, ugrid, l_tables, nr_cells, cells, ht_, v_, t_);
//cudaCheckError();
//ctimer2.stop();
//ctimer2.print(std::string(" ... mc_slice()"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute ambiguous cases
//ctimer2.start();
b_size = AMB_BLOCKSIZE;
g_size = (nr_acells + b_size - 1) / b_size;
t_slice << < g_size, b_size >> >(i0, m_volume, ugrid, l_tables, nr_acells, acells, ht_, v_, t_);
//cudaCheckError();
//ctimer2.stop();
//ctimer2.print(std::string(" ... ambiguous cases"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// don't need volume data any more
//cudaFreeArray(d_data);
//cudaCheckError();
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute shared vertex list for triangle mesh
// indices of triangles have to be mapped to vertex index in vertex array
// get number of vertices
//ctimer2.start();
nr_v = size<Vertices>(v_);
// get number of triangles
nr_t = size<Triangles>(t_);
// map triangles indices
b_size = 512;
g_size = (3 * nr_t + b_size - 1) / b_size;
map_triangles_fast << < g_size, b_size >> >(nr_t, ht_, t_);
//cudaCheckError();
//ctimer2.stop();
//ctimer2.print(std::string(" ... map triangles"));
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// compute processing time
ctimer1.stop();
cudaDeviceSynchronize();
ctimer1.print(std::string("tmc"));
// Shared vertex list
float4* v_array = new float4[nr_v];
float4* n_array = new float4[nr_v];
//vAttr* vts = new vAttr[nr_v];
int4* t_array = new int4[nr_t];
cudaMemcpy(v_array, v_.vertices, nr_v * sizeof(float4), cudaMemcpyDeviceToHost);
cudaMemcpy(n_array, v_.normals, nr_v * sizeof(float4), cudaMemcpyDeviceToHost);
//cudaMemcpy(vts, v_.v, nr_v * sizeof(vAttr), cudaMemcpyDeviceToHost);
cudaMemcpy(t_array, t_.triangles, nr_t * sizeof(int4), cudaMemcpyDeviceToHost);
std::cout << " ... total nr. of vertices " << nr_v << std::endl;
std::cout << " ... total nr. of triangles " << nr_t << std::endl;
std::cout << " ... total nr. of unambiguous cells " << nr1 << std::endl;
std::cout << " ... total nr. of ambiguous cells " << nr2 << std::endl;
*vertices = new float[3 * nr_v];
*normals = new float[3 * nr_v];
*triangles = new int[3 * nr_t];
for (int id = 0; id < nr_v; id++) {
// copy vertices
(*vertices)[3 * id] = v_array[id].x;
(*vertices)[3 * id + 1] = v_array[id].y;
(*vertices)[3 * id + 2] = v_array[id].z;
(*vertices)[3 * id + 3] = 1.0f;
//// copy normals
(*normals)[3 * id] = -n_array[id].x;
(*normals)[3 * id + 1] = -n_array[id].y;
(*normals)[3 * id + 2] = -n_array[id].z;
(*normals)[3 * id + 3] = 0.0f;
}
for (int id = 0; id < nr_t; id++) {
(*triangles)[3 * id] = t_array[id].x;
(*triangles)[3 * id + 1] = t_array[id].y;
(*triangles)[3 * id + 2] = t_array[id].z;
}
std::cout << " ... done\n";
// host memory
delete[] v_array;
delete[] n_array;
//delete[] vts;
delete[] t_array;
// free common data
// free memory
cudaFreeArray(d_data);
freeMC_lookup(l_tables);
freeVertices(v_);
freeTriangles(t_);
freeCells(cells);
freeACells(acells);
// arrays for vertex count
cudaFree(d_acount);
cudaFree(d_vcount);
}
|
e2e5dcaa54ea0b52626db2d7a6fff8fa8cd162d9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* standardCUDAfunctions.cu
*
* Created on: Jul 24, 2014
* Author: preibisch
*/
#include "book.h"
#include "hip/hip_runtime.h"
#include "standardCUDAfunctions.h"
//==============================================
int getCUDAcomputeCapabilityMajorVersion(int devCUDA)
{
int major = 0, minor = 0;
hipDeviceComputeCapability(&major, &minor,devCUDA);
return major;
}
int getCUDAcomputeCapabilityMinorVersion(int devCUDA)
{
int major = 0, minor = 0;
hipDeviceComputeCapability(&major, &minor,devCUDA);
return minor;
}
int getNumDevicesCUDA()
{
int count = 0;
HANDLE_ERROR(hipGetDeviceCount ( &count ));
return count;
}
void getNameDeviceCUDA(int devCUDA, char* name)
{
hipDeviceProp_t prop;
HANDLE_ERROR( hipGetDeviceProperties(&prop, devCUDA));
memcpy(name,prop.name,sizeof(char)*256);
}
#include <iostream>
bool isDeviceCUDAusedByDisplay(int devCUDA)
{
int has_timeout;
HANDLE_ERROR( hipDeviceGetAttribute(&has_timeout, hipDeviceAttributeKernelExecTimeout, devCUDA) );
//std::cout << has_timeout << std::endl;
return (has_timeout > 0);
}
long long int getMemDeviceCUDA(int devCUDA)
{
hipDeviceProp_t prop;
HANDLE_ERROR( hipGetDeviceProperties(&prop, devCUDA));
return ((long long int)prop.totalGlobalMem);
}
long long int getAvailableMemDeviceCUDA(int devCUDA)
{
setDeviceCUDA(devCUDA);
size_t free, total;
HANDLE_ERROR(hipMemGetInfo(&free, &total));
return ((long long int)free);
}
void setDeviceCUDA(int devCUDA)
{
HANDLE_ERROR(hipSetDevice(devCUDA));
}
void resetDeviceCUDA(int devCUDA)
{
HANDLE_ERROR(hipSetDevice(devCUDA));
HANDLE_ERROR(hipDeviceReset());
} | e2e5dcaa54ea0b52626db2d7a6fff8fa8cd162d9.cu | /*
* standardCUDAfunctions.cu
*
* Created on: Jul 24, 2014
* Author: preibisch
*/
#include "book.h"
#include "cuda.h"
#include "standardCUDAfunctions.h"
//==============================================
int getCUDAcomputeCapabilityMajorVersion(int devCUDA)
{
int major = 0, minor = 0;
cuDeviceComputeCapability(&major, &minor,devCUDA);
return major;
}
int getCUDAcomputeCapabilityMinorVersion(int devCUDA)
{
int major = 0, minor = 0;
cuDeviceComputeCapability(&major, &minor,devCUDA);
return minor;
}
int getNumDevicesCUDA()
{
int count = 0;
HANDLE_ERROR(cudaGetDeviceCount ( &count ));
return count;
}
void getNameDeviceCUDA(int devCUDA, char* name)
{
cudaDeviceProp prop;
HANDLE_ERROR( cudaGetDeviceProperties(&prop, devCUDA));
memcpy(name,prop.name,sizeof(char)*256);
}
#include <iostream>
bool isDeviceCUDAusedByDisplay(int devCUDA)
{
int has_timeout;
HANDLE_ERROR( cudaDeviceGetAttribute(&has_timeout, cudaDevAttrKernelExecTimeout, devCUDA) );
//std::cout << has_timeout << std::endl;
return (has_timeout > 0);
}
long long int getMemDeviceCUDA(int devCUDA)
{
cudaDeviceProp prop;
HANDLE_ERROR( cudaGetDeviceProperties(&prop, devCUDA));
return ((long long int)prop.totalGlobalMem);
}
long long int getAvailableMemDeviceCUDA(int devCUDA)
{
setDeviceCUDA(devCUDA);
size_t free, total;
HANDLE_ERROR(cudaMemGetInfo(&free, &total));
return ((long long int)free);
}
void setDeviceCUDA(int devCUDA)
{
HANDLE_ERROR(cudaSetDevice(devCUDA));
}
void resetDeviceCUDA(int devCUDA)
{
HANDLE_ERROR(cudaSetDevice(devCUDA));
HANDLE_ERROR(cudaDeviceReset());
} |
a2f3f2867f197646defced9cccf981b1ff117985.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <hiprand/hiprand.h>
#include <unistd.h>
#include <hiprand/hiprand_kernel.h>
const int NUMTHREADS = 1024;
int startNodeNumber;
int endNodeNumber;
typedef struct lList {
int path[50];
struct lList *next;
} lList;
void push(lList **l, int *x) {
if (*l == NULL)
*l = (struct lList*)malloc(sizeof(struct lList));
lList* newPath = (struct lList*)malloc(sizeof(struct lList));
memcpy(newPath->path, x, 50);
lList *iterator = *l;
while (iterator->next != NULL) {
iterator = iterator->next;
}
iterator->next = newPath;
}
int *dequeue(lList **l) {
if (*l == NULL) {
printf("Error! List is Empty!\n");
return 0;
}
int *val = (*l)->path;
*l = (*l)->next;
return val;
}
/*
* Each matrix is one dimentional, but is treated 2 dimentionaly e.g.
*
* 1 0 1
* 0 1 0
* 0 1 1
*
* EQUALS
*
* {1, 0, 1, 0, 1, 0, 0, 1, 1}
*
*/
//Flags
int fTimeOnly = 0; //Only print out the computation times
int fGPUOnly = 0; //Only preform operation on the GPU
int fShowPaths = 0; //List all the generated paths
//------------------------------------------------------------------------------------------
/*
* generateAdjMatrix - Returns a new random adjacency matrix
* count - the size of the matrix. the size is count X count)
*/
long *generateAdjMatrix(int count) {
long *randomMatrix = (long *) malloc(count * count * sizeof(long));
int i, j;
//Set the random seed to the current time
srand(time(NULL));
//Create a random adjacency matrix using rand. Nodes do not connect to them selves
for (i = 0; i < count; i++) {
for (j = 0; j < count; j++) {
if (i != j) {
long randomResult = rand() % 2;
randomMatrix[(i * count) + j] = randomResult;
randomMatrix[(j * count) + i] = randomResult;
}
}
}
return randomMatrix;
}
/*
* printAdjMatrix - Prints and adjacency matrix
* count - the height of the matrix
* matrix - the adjacency matrix
*/
void printAdjMatrix(int count, long *matrix) {
int i;
for (i = 0; i < count; i++) {
int j;
for (j = 0; j < count; j++) {
printf("%3ld ", matrix[(i * count) + j]);
}
printf("\n");
}
}
/*
* CPUMultiplyMatrix - copies the cross multiplied matrix of matrices 1 and 2, into matrix2 (The matrices must be of the same height and width)
* The runtime is O(n^3) * t where n = the height of the matrix, t = the number of times to multiply
* matrix1 - the first adjacency matrix
* matrix2 - the second adjacency matrix
* paths - the number of paths (times) to preform matrix multiplication
* count - the height of the matrix
*/
void CPUMultiplyMatrix(long **matrix1, long **matrix2, int paths, int count) {
long *newMatrix = (long *) malloc(sizeof(long) * count * count);
int i, j, k;
while (paths > 0) {
for (i = 0; i < count; i++) {
for (j = 0; j < count; j++) {
for (k = 0; k < count; k++) {
newMatrix[(i * count) + j] += (*matrix1)[(i * count) + k] * (*matrix2)[(k * count) + j];
}
}
}
//Copy newMatrix to matrix2 and clear newMatrix
for (i = 0; i < count * count; i++) {
(*matrix2)[i] = newMatrix[i];
newMatrix[i] = 0;
}
paths--;
}
free(newMatrix);
}
/*
* CPUTraverse - takes in an adjacency matrix and returns all paths as an int array
* matrix - The original adjacency matrix
* count - the height of the matrices
* paths -
* numPaths - the length of the paths
* startNodeNumber - the starting node number (from 0 to count-1)
* endNodeNumber - the ending node number (from 0 to count-1)
*/
void CPUTraverse(long *matrix, int count, int numPaths, int startNodeNumber, int endNodeNumber, lList **listOfPaths) {
int currLen = 0;
lList **list;
int start[50] = {startNodeNumber};
push(list, start);
lList **newList;
while (*list != NULL) {
for (int i = 0; i < count; ++i) {
if (matrix[(((*list)->path)[currLen]) * count + i] == 1) {
(*list)->path[currLen] = i;;
push(newList, (*list)->path);
}
}
dequeue(list);
}
}
/*
* CPUMatrixMultiplication - Preforms CPU matrix multiplication on an adjacency matrix
* count - number of nodes
* path - number of paths
* matrix - an adjancency matrix
*/
void CPUMatrixMultiplication(int count, int path, long *matrix) {
//Array to store the multiplied array
long *cpuMM = (long *) malloc(count * count * sizeof(long));
//Copy matrix to cpuMM
int i;
for (i = 0; i < count * count; i++) {
cpuMM[i] = matrix[i];
}
//Create the time interval
struct timeval start, end;
//Start time
gettimeofday(&start, NULL);
//The completed multiplied matrix
CPUMultiplyMatrix(&matrix, &cpuMM, path, count);
//End time
gettimeofday(&end, NULL);
//Save the computed time
unsigned int seconds = end.tv_sec - start.tv_sec;
unsigned long microseconds = end.tv_usec - start.tv_usec;
//Print the multiplied matrix
printf("CPU Generated matrix:\n");
if (!fTimeOnly)
printAdjMatrix(count, cpuMM);
printf("Took %d seconds, and %lu microseconds to compute\n\n", seconds, microseconds % 1000000);
lList *paths = (lList) {.path = {0}, .next = NULL};
if (fShowPaths) {
CPUTraverse(matrix, count, cpuMM[startNodeNumber * count + endNodeNumber], startNodeNumber, endNodeNumber,
&paths);
}
free(cpuMM);
}
/*
* GPUMultiplyMatrix (GPU Only)- Returns a cross multiplied matrix of two matrixies (The matrixies must be of the same height and width)
* Each core is calculating an element on the multiplied matrix
* e.g.
*
* a b c 1a 1b 1c 2a 2b 2c
* d e f = 1d 1e 1f X 2d 2e 2f
* g h i 1g 1h 1i 2g 2h 2i
*
* There will be 9 cores (3x3)
* Core 0, the first core will calculate (a) in the final matrix (1a*2a + 1d*2b + 1g*2c)
* Core 1, the first core will calculate (b) in the final matrix (1b*2a + 1e*2b + 1h*2c)
* And so on...
*
* matrix1 - the first adjacency matrix
* matrix2 - the second adjacency matrix
* paths - the number of paths (times) to preform matrix multiplication
* count - the height of the matrix
*/
/*
__global__ void GPUMultiplyMatrix(long *matrix1, long *matrix2, int paths, int count) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
int i;
while (paths > 0) {
long sum = 0;
int col = element % count;
int row = element / count;
for (i = 0; i < count; i++) {
sum += matrix1[count * i + col] * matrix2[row * count + i];
}
//Wait till all GPU cores are finished
__syncthreads();
matrix2[element] = sum;
paths--;
}
}
*/
/*
* GPUMatrixMultiplication - Preforms GPU matrix multiplication on an adjacency matrix
* count - number of nodes
* path - number of paths
* matrix - an adjancency matrix
*/
/*
void GPUMatrixMultiplication(int count, int path, long *matrix, int nodeA, int nodeB) {
//An adjacency matrix on the GPU
long *gpuMatrix;
//The multiplied matrix on the GPU
long *gpuMM;
//A matrix that will store gpuMM on the CPU
long *multipliedMatrix = (long *) malloc(count * count * sizeof(long));
//The number of GPUs needed for matrix multiplication
int numBlocks = (count * count) / NUMTHREADS + 1;
//Allocate the memory on the GPU
hipMalloc(&gpuMatrix, (count * count * sizeof(long)));
hipMalloc(&gpuMM, (count * count * sizeof(long)));
//Copy the input matrix from the CPU to the GPU (matrix -> gpuMatrix)
hipMemcpy(gpuMatrix, matrix, (count * count * sizeof(long)), hipMemcpyHostToDevice);
hipMemcpy(gpuMM, matrix, (count * count * sizeof(long)), hipMemcpyHostToDevice);
//Create the time intervals
struct timeval start, end;
//Start time
gettimeofday(&start, NULL);
//Preform the multiplied matrix function on gpuMatrix, and gpuMM, and store into gpuMM
GPUMultiplyMatrix <<<numBlocks, NUMTHREADS>>> (gpuMatrix, gpuMM, path, count);
//End time
gettimeofday(&end, NULL);
//Copy gpuMM from the GPU to the CPU in multipiedMatrix
hipMemcpy(multipliedMatrix, gpuMM, (count * count * sizeof(long)), hipMemcpyDeviceToHost);
hipFree(&gpuMM);
//Calculate time
long microseconds = end.tv_usec - start.tv_usec;
//Print the multiplied matrix, copied earlier from the GPU
printf("GPU Generated matrix:\n");
if (!fTimeOnly)
printAdjMatrix(count, multipliedMatrix);
printf("Took %li microseconds to compute\n", microseconds);
printf("\n");
//Traverse, or not to traverse
if (fShowPaths) {
//GPUTraverse<<<>>>()
}
}
*/
/*
* GPUTraverse (GPU Only) - takes in a matrix and returns all paths as an int array the
* matrix - The original matrix
* count - the height of the matrix
* numPaths - The number of paths that exists (To compare to the result)
* pathLength - The length of the paths
* startNodeNumber - the starting node number (from 0 to count-1)
* endNodeNumber - the ending node number (from 0 to count-1)
* listOfPaths - An array storing the set of paths from A to B (output)
*/
/*
__global__ void GPUTraverse(long *matrix, int count, int numPaths, int pathLength, int startNodeNumber,
int endNodeNumber, long **listOfPaths) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
numBlocks = numPaths / NUMTHREADS + 1;
//The list of paths on the GPU
long *gpuPaths;
hipMalloc(&gpuPaths, numPaths * sizeof(long));
//current length of the path
int currLength = 0;
//current Node in the graph
int currNode = startNodeNumber;
// Algorithm
paths[element * length + currLength] = currNode;
currLength++;
while (currLength != length) {
if (currLength == length - 1) {
//this case is to assist in our bruteforce algorithm
//if we can only make one more transition instead of doing
//a random transition we try to move to the endNodeNumber point
if (matrix[currNode * count + endNodeNumber] == 1) {
currNode = endNodeNumber;
paths[element * length + currLength] = currNode;
currLength++;
//check for duplicates
//int i;
//for(i = 0; i < numPaths; i++){
// int j;
// for(j = 0; j < length; j++){
//
// }
//}
} else {//if we can't connect to the endpoint we restart
currLength = 1;
currNode = startNodeNumber;
paths[element * length + 0] = currNode;
}
} else {
int randIdx;
do {
randIdx = hiprand(&state) % count;
} while (matrix[currNode * count + randIdx] != 1);
currNode = randIdx;
paths[element * length + currLength] = currNode;
currLength++;
}
}
hipMemcpy(paths, gpuPaths, numPaths * sizeof(long), hipMemcpyDeviceToHost);
hipFree(&gpuPaths);
//Print out the paths (stack or linked list)
printf("From Node<%d> to Node<%d>: There are %d paths\n", nodeA, nodeB, numPaths);
}
*/
//Main function
int main(int argc, char *argv[]) {
char usageString[500] = ("Usage:\n-t: Print the Calculation Time only\n-d: Default - Set number of nodes to 10, number of paths to 3\n");
strcat(usageString,
"-g: Preform calculations on GPU only\n-s: Show the paths\n-c <num of nodes>\n-p <num of paths>\n");
strcat(usageString, "-a <start node number (0 to c-1)>\n-b <end node number (0 to c-1)>\n\n");
int count; //Number of nodes
int path; //Number of paths
long *adjMatrix;
//start and end of the path
startNodeNumber = 0;
endNodeNumber = 3;
//If there is more than 2 parameters
opterr = 0;
int c;
//If no parametters are passed
if (argc == 1) {
fprintf(stderr, "%s", usageString);
return 1;
}
while ((c = getopt(argc, argv, "dgtsc:p:a:b:")) != -1) {
switch (c) {
//Flags
case 'd':
count = 10;
path = 2;
break;
case 'g':
fGPUOnly = 1;
break;
case 't':
fTimeOnly = 1;
break;
case 's':
fShowPaths = 1;
break;
//Parameters
case 'c':
count = atoi(optarg);
break;
case 'p':
path = atoi(optarg);
break;
case 'a':
startNodeNumber = atoi(optarg);
break;
case 'b':
endNodeNumber = atoi(optarg);
break;
case '?':
if (optopt == 'c' || optopt == 'p' || optopt == 'a' || optopt == 'b') {
fprintf(stderr, "Option (-%c) requires an argument.\n\n%s\n", optopt, usageString);
} else {
fprintf(stderr, "%s", usageString);
}
return 1;
default:
printf("Error...");
return 2;
}
}
path--;
//adjMatrix now equals a new Random adjancency Matrix
adjMatrix = generateAdjMatrix(count);
//Print the generated adjancency matrix
if (!fTimeOnly) {
printf("Generated Adjancency Matritx:\n");
printAdjMatrix(count, adjMatrix);
printf("\n");
}
//Compute the CPU function
if (!fGPUOnly)
CPUMatrixMultiplication(count, path, adjMatrix);
//Compute the GPU function
//GPUMatrixMultiplication(count, path, adjMatrix, startNodeNumber, endNodeNumber);
return 0;
}
| a2f3f2867f197646defced9cccf981b1ff117985.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <curand.h>
#include <unistd.h>
#include <curand_kernel.h>
const int NUMTHREADS = 1024;
int startNodeNumber;
int endNodeNumber;
typedef struct lList {
int path[50];
struct lList *next;
} lList;
void push(lList **l, int *x) {
if (*l == NULL)
*l = (struct lList*)malloc(sizeof(struct lList));
lList* newPath = (struct lList*)malloc(sizeof(struct lList));
memcpy(newPath->path, x, 50);
lList *iterator = *l;
while (iterator->next != NULL) {
iterator = iterator->next;
}
iterator->next = newPath;
}
int *dequeue(lList **l) {
if (*l == NULL) {
printf("Error! List is Empty!\n");
return 0;
}
int *val = (*l)->path;
*l = (*l)->next;
return val;
}
/*
* Each matrix is one dimentional, but is treated 2 dimentionaly e.g.
*
* 1 0 1
* 0 1 0
* 0 1 1
*
* EQUALS
*
* {1, 0, 1, 0, 1, 0, 0, 1, 1}
*
*/
//Flags
int fTimeOnly = 0; //Only print out the computation times
int fGPUOnly = 0; //Only preform operation on the GPU
int fShowPaths = 0; //List all the generated paths
//------------------------------------------------------------------------------------------
/*
* generateAdjMatrix - Returns a new random adjacency matrix
* count - the size of the matrix. the size is count X count)
*/
long *generateAdjMatrix(int count) {
long *randomMatrix = (long *) malloc(count * count * sizeof(long));
int i, j;
//Set the random seed to the current time
srand(time(NULL));
//Create a random adjacency matrix using rand. Nodes do not connect to them selves
for (i = 0; i < count; i++) {
for (j = 0; j < count; j++) {
if (i != j) {
long randomResult = rand() % 2;
randomMatrix[(i * count) + j] = randomResult;
randomMatrix[(j * count) + i] = randomResult;
}
}
}
return randomMatrix;
}
/*
* printAdjMatrix - Prints and adjacency matrix
* count - the height of the matrix
* matrix - the adjacency matrix
*/
void printAdjMatrix(int count, long *matrix) {
int i;
for (i = 0; i < count; i++) {
int j;
for (j = 0; j < count; j++) {
printf("%3ld ", matrix[(i * count) + j]);
}
printf("\n");
}
}
/*
* CPUMultiplyMatrix - copies the cross multiplied matrix of matrices 1 and 2, into matrix2 (The matrices must be of the same height and width)
* The runtime is O(n^3) * t where n = the height of the matrix, t = the number of times to multiply
* matrix1 - the first adjacency matrix
* matrix2 - the second adjacency matrix
* paths - the number of paths (times) to preform matrix multiplication
* count - the height of the matrix
*/
void CPUMultiplyMatrix(long **matrix1, long **matrix2, int paths, int count) {
long *newMatrix = (long *) malloc(sizeof(long) * count * count);
int i, j, k;
while (paths > 0) {
for (i = 0; i < count; i++) {
for (j = 0; j < count; j++) {
for (k = 0; k < count; k++) {
newMatrix[(i * count) + j] += (*matrix1)[(i * count) + k] * (*matrix2)[(k * count) + j];
}
}
}
//Copy newMatrix to matrix2 and clear newMatrix
for (i = 0; i < count * count; i++) {
(*matrix2)[i] = newMatrix[i];
newMatrix[i] = 0;
}
paths--;
}
free(newMatrix);
}
/*
* CPUTraverse - takes in an adjacency matrix and returns all paths as an int array
* matrix - The original adjacency matrix
* count - the height of the matrices
* paths -
* numPaths - the length of the paths
* startNodeNumber - the starting node number (from 0 to count-1)
* endNodeNumber - the ending node number (from 0 to count-1)
*/
void CPUTraverse(long *matrix, int count, int numPaths, int startNodeNumber, int endNodeNumber, lList **listOfPaths) {
int currLen = 0;
lList **list;
int start[50] = {startNodeNumber};
push(list, start);
lList **newList;
while (*list != NULL) {
for (int i = 0; i < count; ++i) {
if (matrix[(((*list)->path)[currLen]) * count + i] == 1) {
(*list)->path[currLen] = i;;
push(newList, (*list)->path);
}
}
dequeue(list);
}
}
/*
* CPUMatrixMultiplication - Preforms CPU matrix multiplication on an adjacency matrix
* count - number of nodes
* path - number of paths
* matrix - an adjancency matrix
*/
void CPUMatrixMultiplication(int count, int path, long *matrix) {
//Array to store the multiplied array
long *cpuMM = (long *) malloc(count * count * sizeof(long));
//Copy matrix to cpuMM
int i;
for (i = 0; i < count * count; i++) {
cpuMM[i] = matrix[i];
}
//Create the time interval
struct timeval start, end;
//Start time
gettimeofday(&start, NULL);
//The completed multiplied matrix
CPUMultiplyMatrix(&matrix, &cpuMM, path, count);
//End time
gettimeofday(&end, NULL);
//Save the computed time
unsigned int seconds = end.tv_sec - start.tv_sec;
unsigned long microseconds = end.tv_usec - start.tv_usec;
//Print the multiplied matrix
printf("CPU Generated matrix:\n");
if (!fTimeOnly)
printAdjMatrix(count, cpuMM);
printf("Took %d seconds, and %lu microseconds to compute\n\n", seconds, microseconds % 1000000);
lList *paths = (lList) {.path = {0}, .next = NULL};
if (fShowPaths) {
CPUTraverse(matrix, count, cpuMM[startNodeNumber * count + endNodeNumber], startNodeNumber, endNodeNumber,
&paths);
}
free(cpuMM);
}
/*
* GPUMultiplyMatrix (GPU Only)- Returns a cross multiplied matrix of two matrixies (The matrixies must be of the same height and width)
* Each core is calculating an element on the multiplied matrix
* e.g.
*
* a b c 1a 1b 1c 2a 2b 2c
* d e f = 1d 1e 1f X 2d 2e 2f
* g h i 1g 1h 1i 2g 2h 2i
*
* There will be 9 cores (3x3)
* Core 0, the first core will calculate (a) in the final matrix (1a*2a + 1d*2b + 1g*2c)
* Core 1, the first core will calculate (b) in the final matrix (1b*2a + 1e*2b + 1h*2c)
* And so on...
*
* matrix1 - the first adjacency matrix
* matrix2 - the second adjacency matrix
* paths - the number of paths (times) to preform matrix multiplication
* count - the height of the matrix
*/
/*
__global__ void GPUMultiplyMatrix(long *matrix1, long *matrix2, int paths, int count) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
int i;
while (paths > 0) {
long sum = 0;
int col = element % count;
int row = element / count;
for (i = 0; i < count; i++) {
sum += matrix1[count * i + col] * matrix2[row * count + i];
}
//Wait till all GPU cores are finished
__syncthreads();
matrix2[element] = sum;
paths--;
}
}
*/
/*
* GPUMatrixMultiplication - Preforms GPU matrix multiplication on an adjacency matrix
* count - number of nodes
* path - number of paths
* matrix - an adjancency matrix
*/
/*
void GPUMatrixMultiplication(int count, int path, long *matrix, int nodeA, int nodeB) {
//An adjacency matrix on the GPU
long *gpuMatrix;
//The multiplied matrix on the GPU
long *gpuMM;
//A matrix that will store gpuMM on the CPU
long *multipliedMatrix = (long *) malloc(count * count * sizeof(long));
//The number of GPUs needed for matrix multiplication
int numBlocks = (count * count) / NUMTHREADS + 1;
//Allocate the memory on the GPU
cudaMalloc(&gpuMatrix, (count * count * sizeof(long)));
cudaMalloc(&gpuMM, (count * count * sizeof(long)));
//Copy the input matrix from the CPU to the GPU (matrix -> gpuMatrix)
cudaMemcpy(gpuMatrix, matrix, (count * count * sizeof(long)), cudaMemcpyHostToDevice);
cudaMemcpy(gpuMM, matrix, (count * count * sizeof(long)), cudaMemcpyHostToDevice);
//Create the time intervals
struct timeval start, end;
//Start time
gettimeofday(&start, NULL);
//Preform the multiplied matrix function on gpuMatrix, and gpuMM, and store into gpuMM
GPUMultiplyMatrix <<<numBlocks, NUMTHREADS>>> (gpuMatrix, gpuMM, path, count);
//End time
gettimeofday(&end, NULL);
//Copy gpuMM from the GPU to the CPU in multipiedMatrix
cudaMemcpy(multipliedMatrix, gpuMM, (count * count * sizeof(long)), cudaMemcpyDeviceToHost);
cudaFree(&gpuMM);
//Calculate time
long microseconds = end.tv_usec - start.tv_usec;
//Print the multiplied matrix, copied earlier from the GPU
printf("GPU Generated matrix:\n");
if (!fTimeOnly)
printAdjMatrix(count, multipliedMatrix);
printf("Took %li microseconds to compute\n", microseconds);
printf("\n");
//Traverse, or not to traverse
if (fShowPaths) {
//GPUTraverse<<<>>>()
}
}
*/
/*
* GPUTraverse (GPU Only) - takes in a matrix and returns all paths as an int array the
* matrix - The original matrix
* count - the height of the matrix
* numPaths - The number of paths that exists (To compare to the result)
* pathLength - The length of the paths
* startNodeNumber - the starting node number (from 0 to count-1)
* endNodeNumber - the ending node number (from 0 to count-1)
* listOfPaths - An array storing the set of paths from A to B (output)
*/
/*
__global__ void GPUTraverse(long *matrix, int count, int numPaths, int pathLength, int startNodeNumber,
int endNodeNumber, long **listOfPaths) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
numBlocks = numPaths / NUMTHREADS + 1;
//The list of paths on the GPU
long *gpuPaths;
cudaMalloc(&gpuPaths, numPaths * sizeof(long));
//current length of the path
int currLength = 0;
//current Node in the graph
int currNode = startNodeNumber;
// Algorithm
paths[element * length + currLength] = currNode;
currLength++;
while (currLength != length) {
if (currLength == length - 1) {
//this case is to assist in our bruteforce algorithm
//if we can only make one more transition instead of doing
//a random transition we try to move to the endNodeNumber point
if (matrix[currNode * count + endNodeNumber] == 1) {
currNode = endNodeNumber;
paths[element * length + currLength] = currNode;
currLength++;
//check for duplicates
//int i;
//for(i = 0; i < numPaths; i++){
// int j;
// for(j = 0; j < length; j++){
//
// }
//}
} else {//if we can't connect to the endpoint we restart
currLength = 1;
currNode = startNodeNumber;
paths[element * length + 0] = currNode;
}
} else {
int randIdx;
do {
randIdx = curand(&state) % count;
} while (matrix[currNode * count + randIdx] != 1);
currNode = randIdx;
paths[element * length + currLength] = currNode;
currLength++;
}
}
cudaMemcpy(paths, gpuPaths, numPaths * sizeof(long), cudaMemcpyDeviceToHost);
cudaFree(&gpuPaths);
//Print out the paths (stack or linked list)
printf("From Node<%d> to Node<%d>: There are %d paths\n", nodeA, nodeB, numPaths);
}
*/
//Main function
int main(int argc, char *argv[]) {
char usageString[500] = ("Usage:\n-t: Print the Calculation Time only\n-d: Default - Set number of nodes to 10, number of paths to 3\n");
strcat(usageString,
"-g: Preform calculations on GPU only\n-s: Show the paths\n-c <num of nodes>\n-p <num of paths>\n");
strcat(usageString, "-a <start node number (0 to c-1)>\n-b <end node number (0 to c-1)>\n\n");
int count; //Number of nodes
int path; //Number of paths
long *adjMatrix;
//start and end of the path
startNodeNumber = 0;
endNodeNumber = 3;
//If there is more than 2 parameters
opterr = 0;
int c;
//If no parametters are passed
if (argc == 1) {
fprintf(stderr, "%s", usageString);
return 1;
}
while ((c = getopt(argc, argv, "dgtsc:p:a:b:")) != -1) {
switch (c) {
//Flags
case 'd':
count = 10;
path = 2;
break;
case 'g':
fGPUOnly = 1;
break;
case 't':
fTimeOnly = 1;
break;
case 's':
fShowPaths = 1;
break;
//Parameters
case 'c':
count = atoi(optarg);
break;
case 'p':
path = atoi(optarg);
break;
case 'a':
startNodeNumber = atoi(optarg);
break;
case 'b':
endNodeNumber = atoi(optarg);
break;
case '?':
if (optopt == 'c' || optopt == 'p' || optopt == 'a' || optopt == 'b') {
fprintf(stderr, "Option (-%c) requires an argument.\n\n%s\n", optopt, usageString);
} else {
fprintf(stderr, "%s", usageString);
}
return 1;
default:
printf("Error...");
return 2;
}
}
path--;
//adjMatrix now equals a new Random adjancency Matrix
adjMatrix = generateAdjMatrix(count);
//Print the generated adjancency matrix
if (!fTimeOnly) {
printf("Generated Adjancency Matritx:\n");
printAdjMatrix(count, adjMatrix);
printf("\n");
}
//Compute the CPU function
if (!fGPUOnly)
CPUMatrixMultiplication(count, path, adjMatrix);
//Compute the GPU function
//GPUMatrixMultiplication(count, path, adjMatrix, startNodeNumber, endNodeNumber);
return 0;
}
|
89a5f76151f48633cf8e34f5aac1d6c798f73bb5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "DmeanSquareLoss.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int lengthx = 1;
const double pref = 1;
const double *gradc = NULL;
hipMalloc(&gradc, XSIZE*YSIZE);
const double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
const double *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
double *gradn = NULL;
hipMalloc(&gradn, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
DmeanSquareLoss), dim3(gridBlock),dim3(threadBlock), 0, 0, lengthx,pref,gradc,x,y,gradn);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
DmeanSquareLoss), dim3(gridBlock),dim3(threadBlock), 0, 0, lengthx,pref,gradc,x,y,gradn);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
DmeanSquareLoss), dim3(gridBlock),dim3(threadBlock), 0, 0, lengthx,pref,gradc,x,y,gradn);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 89a5f76151f48633cf8e34f5aac1d6c798f73bb5.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "DmeanSquareLoss.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int lengthx = 1;
const double pref = 1;
const double *gradc = NULL;
cudaMalloc(&gradc, XSIZE*YSIZE);
const double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
const double *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
double *gradn = NULL;
cudaMalloc(&gradn, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
DmeanSquareLoss<<<gridBlock,threadBlock>>>(lengthx,pref,gradc,x,y,gradn);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
DmeanSquareLoss<<<gridBlock,threadBlock>>>(lengthx,pref,gradc,x,y,gradn);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
DmeanSquareLoss<<<gridBlock,threadBlock>>>(lengthx,pref,gradc,x,y,gradn);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b97fce00073feaff24cd91017c4d29df1b4fcb50.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2013 William J. Brouwer, Pierre-Yves Taunay
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <hip/hip_runtime.h>
#include "LUCoreGPU.h"
#include "RunInfo.h"
#include "vectorOP.h"
#include "vectorOP.cu"
#include "checks.cuh"
#define RED_SUM_SIZE 1024
using namespace std;
namespace LUCoreGPU {
__device__ __constant__ int nMperBl_GPU, matSide_GPU, scrSpace_GPU, scrSpace2_GPU, loadSize_GPU, warpPerMat_GPU;
#ifdef __DPREC__
void CALL(double2 *d_localList, int nMat, RunInfo *rInfo) {
#else
void CALL(float2 **d_Matrices, int nMat, RunInfo *rInfo, int rank, int th_num, hipStream_t *stream) {
#endif
// Power of 2 above and below matSize
int scrSpace = rInfo->getScrSpace();
int scrSpace2= rInfo->getScrSpace2();
// Number of matrices per block, max number of blocks, load size
int nMperBl = rInfo->getNMperBl();
int maxBlocks = rInfo->getMaxBlocks();
int loadSize = rInfo->getLoadSize();
int warpPerMat = rInfo->getWarpPerMat();
// Matrix side
int matSide = rInfo->getMatSide();
hipMemcpyToSymbol(nMperBl_GPU ,&nMperBl,sizeof(nMperBl));
#ifdef __DEBUG__
cudaCheckError("nMperBl memcpy");
#endif
hipMemcpyToSymbol(matSide_GPU ,&matSide,sizeof(matSide));
#ifdef __DEBUG__
cudaCheckError("matSide memcpy");
#endif
hipMemcpyToSymbol(scrSpace_GPU ,&scrSpace,sizeof(scrSpace));
#ifdef __DEBUG__
cudaCheckError("scrSpace memcpy");
#endif
hipMemcpyToSymbol(scrSpace2_GPU,&scrSpace2,sizeof(scrSpace2));
#ifdef __DEBUG__
cudaCheckError("scrSpace2 memcpy");
#endif
hipMemcpyToSymbol(loadSize_GPU ,&loadSize,sizeof(loadSize));
#ifdef __DEBUG__
cudaCheckError("loadSize memcpy");
#endif
hipMemcpyToSymbol(warpPerMat_GPU,&warpPerMat,sizeof(warpPerMat));
#ifdef __DEBUG__
cudaCheckError("warpPerMat memcpy");
#endif
dim3 threads, blocks;
threads.x = nMperBl * scrSpace;
int * devPerms = NULL;
// TODO : Replace if statements with try / catch
if ((nMat / nMperBl) < maxBlocks-1){
//TODO incorporate more perms
blocks.x=(nMat / nMperBl)+1;
int vestige = nMat - ((blocks.x-1) * nMperBl);
#ifdef __VERBOSE__
cout << "INFO \t Node " << rank << " Thread " << th_num << "\t Queuing LU Decomposition kernel with " << blocks.x << " blocks, " << threads.x << " threads" << endl;
#endif
// kernel
hipLaunchKernelGGL(( LUDecomposition) , dim3(blocks), dim3(threads),nMperBl*(1+matSide+5*scrSpace)*sizeof(float),*stream , vestige,d_Matrices[th_num]);
// LUDecomposition <<< blocks, threads,0,*stream >>> (vestige,d_Matrices[th_num]);
// LUDecomposition <<< blocks, threads >>> (vestige,d_Matrices[th_num]);
#ifdef __DEBUG__
cudaCheckError("LU Decomposition kernel");
#endif
} else {
// fprintf(stderr,"%.24s EXITING; exceeded avail blocks on GPU on host %s with rank %i\n",hostname,rank);
exit(1);
}
}
#ifdef __DPREC__
#else
void globalReduction(float2 **d_Matrices, float2 **d_Reduction, int nMat,int rank,int th_num, hipStream_t stream){
#endif
// Compute the sum for fqhe wf
// we've performed the multiplication already
nMat /= 2;
time_t t = time(0);
dim3 threads,blocks;
// TODO: Change number of threads to flexible value
threads.x = RED_SUM_SIZE;
int vestige=0, k=1, i=0;
float2 *d_Output = d_Reduction[th_num];
float2 *d_Input = d_Matrices[th_num];
float2 *tmp;
while (k != -1){
blocks.x = ( nMat > threads.x ) ? nMat / threads.x +1 : 1;
vestige = nMat - (blocks.x-1) * threads.x;
#ifdef __VERBOSE__
#pragma omp critical
cout << "INFO \t Node " << rank << " Thread " << th_num << "\t Queuing the global reduction kernel with " << blocks.x << " blocks, " << threads.x << endl;
#endif
if ((i%2)==1 )
{
tmp = d_Output;
d_Output = d_Input;
d_Input = tmp;
}
hipLaunchKernelGGL(( reductionGlobalSum), dim3(blocks),dim3(threads),0,stream, vestige,d_Input,d_Output,i);
i++;
nMat = blocks.x;
if (blocks.x==1) k=-1;
}
}
#ifdef __DPREC__
__global__ void LUDecomposition (int vestige, double2 * inputMatrices){
#else
__global__ void LUDecomposition (int vestige, float2 * inputMatrices){
#endif
// Broadcast the constant memory to registers
int lmem_nMperBl = nMperBl_GPU;
int lmem_matSide = matSide_GPU;
int lmem_scrSpace = scrSpace_GPU;
int lmem_scrSpace2 = scrSpace2_GPU;
int lmem_loadSize = loadSize_GPU;
int lmem_warpPerMat = warpPerMat_GPU;
#ifdef __DPREC__
double2 sum, dum,tmp;
__shared__ double sign [ lmem_nMperBl ]; // permutation sign, for determinant
volatile __shared__ double scale [ lmem_nMperBl * lmem_matSide ]; // scaling information, store largest value for each row
volatile __shared__ double2 reduce [ lmem_nMperBl * lmem_scrSpace ]; // a reduction buffer
volatile __shared__ double2 vectors [ lmem_nMperBl * lmem_scrSpace ]; // scratch space for main steps in algorithm
#else
float2 sum, dum,tmp;
float den;
extern __shared__ float BANK[];
volatile float *sign = (float*)BANK;
volatile float *scale = (float*)&sign[lmem_nMperBl];
volatile float2 *reduce = (float2*)&scale[lmem_nMperBl*lmem_matSide];
volatile float2 *vectors= (float2*)&reduce[lmem_nMperBl*lmem_scrSpace];
/*
__shared__ float sign[4];
__shared__ volatile float scale[4*16];
__shared__ volatile float2 reduce[4*16];
__shared__ volatile float2 vectors[4*16];
__shared__ volatile int indices[4*16];*/
#endif
volatile int *indices = (int*)&vectors[lmem_nMperBl*lmem_scrSpace];
// Index to matrix for processing
int myMatrix = threadIdx.x / lmem_scrSpace;
// Index to vector for processing
int vectorIndex = threadIdx.x % lmem_scrSpace;
// Initialize permutation signs
#ifdef __DPREC__
sign[threadIdx.x % lmem_nMperBl]=1.0;
#else
sign[threadIdx.x % lmem_nMperBl]=1.0f;
#endif
// book-keeping; if last block, only need vestige * lmem_scrSpace threads
if (blockIdx.x == (gridDim.x-1))
if (threadIdx.x >= vestige * lmem_scrSpace)
return;
// offset for this block
int memoryStride = ( blockIdx.x * lmem_nMperBl * lmem_matSide * lmem_loadSize );
// initialize memory
vectors [ threadIdx.x ].x = -1e24;
vectors [ threadIdx.x ].y = -1e24;
// Determine scaling information
for (int i=0; i < lmem_matSide; ++i){
// load memory
if ( vectorIndex < lmem_loadSize ){
vectors [ vec_index ].x = inputMatrices [ row_i_index ].x;
vectors [ vec_index ].y = inputMatrices [ row_i_index ].y;
}
__syncthreads();
// find maxima
vOP::findVectorMaxima ( vectors, vectorIndex, myMatrix, scrSpace_GPU, scrSpace2_GPU );
// write scaling information
if ( vectorIndex ==i ){
scale [ scale_index ] = abs ( vectors [ vec_00_scr ].x);
}
}
// initialize memory
vectors [ threadIdx.x ].x = 0.0f;
vectors [ threadIdx.x ].y = 0.0f;
//__syncthreads();
// main loops
float2 tmpr;
float2 tmpl;
int myWarp = vectorIndex / 32;
for (int j=0; j<lmem_matSide; j++){
// load the j column to shared
if ( vectorIndex < lmem_matSide ){
vectors [ vec_index ].x = inputMatrices [ col_j_index ].x;
vectors [ vec_index ].y = inputMatrices [ col_j_index ].y;
}
// update the j column
__syncthreads();
for (int i=0; i<lmem_warpPerMat; i++){
if (myWarp==i){
if ( vectorIndex < j ){
sum.x = vectors[vec_index].x;
sum.y = vectors[vec_index].y;
for (int k=0; k< 512; k++){
if (k >= vectorIndex) break;
tmpl.x = inputMatrices[col_k_index].x;
tmpl.y = inputMatrices[col_k_index].y;
tmpr.x = vectors [ vec_k_scr ].x;
tmpr.y = vectors [ vec_k_scr ].y;
sum.x -= (tmpl.x * tmpr.x - tmpl.y * tmpr.y);
sum.y -= (tmpl.y * tmpr.x + tmpl.x * tmpr.y);
vectors[vec_index].x = sum.x;
vectors[vec_index].y = sum.y;
}
}
}
__syncthreads();
}
__syncthreads();
for (int i=0; i<lmem_warpPerMat; i++){
if(myWarp==i) {
if ((vectorIndex >=j) && (vectorIndex < lmem_matSide)){
sum.x = vectors[vec_index].x;
sum.y = vectors[vec_index].y;
for (int k=0; k< j; k++){
tmpl.x = inputMatrices [ col_k_index ].x;
tmpl.y = inputMatrices [ col_k_index ].y;
tmpr.x = vectors [ vec_k_scr ].x;
tmpr.y = vectors [ vec_k_scr ].y;
sum.x -= (tmpl.x * tmpr.x - tmpl.y * tmpr.y);
sum.y -= (tmpl.y * tmpr.x + tmpl.x * tmpr.y);
vectors[vec_index].x=sum.x;
vectors[vec_index].y=sum.y;
}
}
}
}
__syncthreads();
// write j column back to global
if ( vectorIndex < lmem_matSide ){
inputMatrices [ col_j_index ].x = vectors [ vec_index ].x;
inputMatrices [ col_j_index ].y = vectors [ vec_index ].y;
}
// initialize shared memory
reduce [ threadIdx.x ].x = -1e24;
reduce [ threadIdx.x ].y = -1e24;
//__syncthreads();
if ((vectorIndex >= j) && (vectorIndex < lmem_matSide)){
// init for pivot search
reduce [ vec_index - j ].x = abs ( vectors [ vec_index ].x ) / scale [ scale_index ];
indices [ vec_index - j ] = vectorIndex;
}
__syncthreads();
vOP::findVectorMaximaKey ( reduce, indices, vectorIndex, myMatrix, scrSpace_GPU, scrSpace2_GPU );
//__syncthreads();
// possible row swap
if (j != indices [ vec_00_scr ]){
if (vectorIndex < lmem_loadSize){
int i = indices [ vec_00_scr ];
// each thread swaps one row element with another row element
sum = inputMatrices [ row_i_index ];
inputMatrices [ row_i_index ] = inputMatrices [ row_j_index ];
inputMatrices [ row_j_index ] = sum;
if (vectorIndex==0){
scale [ vec_i_sca ] = scale [ vec_j_sca ];
sign [ myMatrix ] *= -1.0f;
}
}
}
__syncthreads();
// final scaling
if ( j != lmem_matSide-1){
dum = inputMatrices [ diag_j_index ];
if ((vectorIndex >= j+1) && (vectorIndex < lmem_matSide)){
tmp = inputMatrices [ col_j_index ];
// Perform division tmp/dum
den = dum.x*dum.x + dum.y*dum.y;
tmpl.x = (tmp.x*dum.x + tmp.y*dum.y)/den;
tmpl.y = (tmp.y*dum.x - tmp.x*dum.y)/den;
tmp.x = tmpl.x;
tmp.y = tmpl.y;
inputMatrices [ col_j_index ] = tmp;
}
}
__syncthreads();
}// end j loops
#ifdef WRITE_DET
// init
vectors [ vec_index].x = 1.0f;
vectors [ vec_index].y = 0.0f;
// load diags
if (vectorIndex < lmem_matSide ){
vectors [ vec_index ].x = inputMatrices [ diag_index ].x;
vectors [ vec_index ].y = inputMatrices [ diag_index ].y;
}
__syncthreads();
// pop in sign
if (vectorIndex == 0 ){
vectors [ vec_00_scr ].x *= sign [ myMatrix ];
vectors [ vec_00_scr ].y *= sign [ myMatrix ];
}
__syncthreads();
#ifdef PERFORM_PROD
// every second row will contain product of two dets
if ((myMatrix %2) ==1){
// compiler complains about 'no copy constructor for float2'
// so doing this explicitly
#ifdef __DPREC__
double2 tmpA, tmpB;
#else
float2 tmpA, tmpB;
#endif
tmpA.x = vectors [ vec_1_index].x;
tmpA.y = vectors [ vec_1_index].y;
tmpB.x = vectors [ vec_index].x;
tmpB.y = vectors [ vec_index].y;
vectors [ vec_1_index ].x = tmpA.x*tmpB.x-tmpA.y*tmpB.y;
vectors [ vec_1_index ].y = tmpA.y*tmpB.x+tmpA.x*tmpB.y;
}
__syncthreads();
#endif
// calculate determinants
vOP::findVectorProduct(vectors, vectorIndex, myMatrix, scrSpace_GPU, scrSpace2_GPU);
__syncthreads();
// write out
if (vectorIndex==0){
inputMatrices [ mat_00_index ].x = vectors [ vec_00_scr ].x ;
inputMatrices [ mat_00_index ].y = vectors [ vec_00_scr ].y ;
}
#endif
}
#ifdef __DPREC__
#else
__global__ void reductionGlobalSum(int vestige, float2 *input, float2 *output, int it){
#endif
int t_id;
if (it == 0){
// first load every second element
t_id = 2*loadSize_GPU * matSide_GPU* (RED_SUM_SIZE * blockIdx.x + threadIdx.x);
}else{
t_id = RED_SUM_SIZE * blockIdx.x + threadIdx.x;
}
__shared__ volatile float2 scratch[RED_SUM_SIZE];
// init
scratch[threadIdx.x].x = 0.0f;
scratch[threadIdx.x].y = 0.0f;
__syncthreads();
// load
if ( blockIdx.x != (gridDim.x-1) ){
scratch[ threadIdx.x ].x = input[ t_id ].x;
scratch[ threadIdx.x ].y = input[ t_id ].y;
}
else{
if (threadIdx.x < vestige){
scratch[ threadIdx.x ].x = input[ t_id ].x;
scratch[ threadIdx.x ].y = input[ t_id ].y;
}
}
__syncthreads();
#if RED_SUM_SIZE == 1024
if (threadIdx.x < 512){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 512 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 512 ].y;
}
__syncthreads();
#endif
#if RED_SUM_SIZE >= 512
if (threadIdx.x < 256){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 256 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 256 ].y;
}
__syncthreads();
#endif
#if RED_SUM_SIZE >= 256
if (threadIdx.x < 128){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 128 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 128 ].y;
}
__syncthreads();
#endif
#if RED_SUM_SIZE >= 128
if (threadIdx.x < 64){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 64 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 64 ].y;
}
__syncthreads();
#endif
#if RED_SUM_SIZE >= 64
if (threadIdx.x < 32){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 32 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 32 ].y;
}
#endif
#if RED_SUM_SIZE >= 32
if (threadIdx.x < 16){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x+16 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x+16 ].y;
}
#endif
if (threadIdx.x < 8){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 8 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 8 ].y;
}
if (threadIdx.x < 4){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 4 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 4 ].y;
}
if (threadIdx.x < 2){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 2 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 2 ].y;
}
if (threadIdx.x < 1){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 1 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 1 ].y;
}
if (threadIdx.x == 0){
output[0].x = scratch[0].x;
output[0].y = scratch[0].y;
}
}
}
| b97fce00073feaff24cd91017c4d29df1b4fcb50.cu | /*
* Copyright 2013 William J. Brouwer, Pierre-Yves Taunay
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <cuda.h>
#include "LUCoreGPU.h"
#include "RunInfo.h"
#include "vectorOP.h"
#include "vectorOP.cu"
#include "checks.cuh"
#define RED_SUM_SIZE 1024
using namespace std;
namespace LUCoreGPU {
__device__ __constant__ int nMperBl_GPU, matSide_GPU, scrSpace_GPU, scrSpace2_GPU, loadSize_GPU, warpPerMat_GPU;
#ifdef __DPREC__
void CALL(double2 *d_localList, int nMat, RunInfo *rInfo) {
#else
void CALL(float2 **d_Matrices, int nMat, RunInfo *rInfo, int rank, int th_num, cudaStream_t *stream) {
#endif
// Power of 2 above and below matSize
int scrSpace = rInfo->getScrSpace();
int scrSpace2= rInfo->getScrSpace2();
// Number of matrices per block, max number of blocks, load size
int nMperBl = rInfo->getNMperBl();
int maxBlocks = rInfo->getMaxBlocks();
int loadSize = rInfo->getLoadSize();
int warpPerMat = rInfo->getWarpPerMat();
// Matrix side
int matSide = rInfo->getMatSide();
cudaMemcpyToSymbol(nMperBl_GPU ,&nMperBl,sizeof(nMperBl));
#ifdef __DEBUG__
cudaCheckError("nMperBl memcpy");
#endif
cudaMemcpyToSymbol(matSide_GPU ,&matSide,sizeof(matSide));
#ifdef __DEBUG__
cudaCheckError("matSide memcpy");
#endif
cudaMemcpyToSymbol(scrSpace_GPU ,&scrSpace,sizeof(scrSpace));
#ifdef __DEBUG__
cudaCheckError("scrSpace memcpy");
#endif
cudaMemcpyToSymbol(scrSpace2_GPU,&scrSpace2,sizeof(scrSpace2));
#ifdef __DEBUG__
cudaCheckError("scrSpace2 memcpy");
#endif
cudaMemcpyToSymbol(loadSize_GPU ,&loadSize,sizeof(loadSize));
#ifdef __DEBUG__
cudaCheckError("loadSize memcpy");
#endif
cudaMemcpyToSymbol(warpPerMat_GPU,&warpPerMat,sizeof(warpPerMat));
#ifdef __DEBUG__
cudaCheckError("warpPerMat memcpy");
#endif
dim3 threads, blocks;
threads.x = nMperBl * scrSpace;
int * devPerms = NULL;
// TODO : Replace if statements with try / catch
if ((nMat / nMperBl) < maxBlocks-1){
//TODO incorporate more perms
blocks.x=(nMat / nMperBl)+1;
int vestige = nMat - ((blocks.x-1) * nMperBl);
#ifdef __VERBOSE__
cout << "INFO \t Node " << rank << " Thread " << th_num << "\t Queuing LU Decomposition kernel with " << blocks.x << " blocks, " << threads.x << " threads" << endl;
#endif
// kernel
LUDecomposition <<< blocks, threads,nMperBl*(1+matSide+5*scrSpace)*sizeof(float),*stream >>> (vestige,d_Matrices[th_num]);
// LUDecomposition <<< blocks, threads,0,*stream >>> (vestige,d_Matrices[th_num]);
// LUDecomposition <<< blocks, threads >>> (vestige,d_Matrices[th_num]);
#ifdef __DEBUG__
cudaCheckError("LU Decomposition kernel");
#endif
} else {
// fprintf(stderr,"%.24s EXITING; exceeded avail blocks on GPU on host %s with rank %i\n",hostname,rank);
exit(1);
}
}
#ifdef __DPREC__
#else
void globalReduction(float2 **d_Matrices, float2 **d_Reduction, int nMat,int rank,int th_num, cudaStream_t stream){
#endif
// Compute the sum for fqhe wf
// we've performed the multiplication already
nMat /= 2;
time_t t = time(0);
dim3 threads,blocks;
// TODO: Change number of threads to flexible value
threads.x = RED_SUM_SIZE;
int vestige=0, k=1, i=0;
float2 *d_Output = d_Reduction[th_num];
float2 *d_Input = d_Matrices[th_num];
float2 *tmp;
while (k != -1){
blocks.x = ( nMat > threads.x ) ? nMat / threads.x +1 : 1;
vestige = nMat - (blocks.x-1) * threads.x;
#ifdef __VERBOSE__
#pragma omp critical
cout << "INFO \t Node " << rank << " Thread " << th_num << "\t Queuing the global reduction kernel with " << blocks.x << " blocks, " << threads.x << endl;
#endif
if ((i%2)==1 )
{
tmp = d_Output;
d_Output = d_Input;
d_Input = tmp;
}
reductionGlobalSum<<<blocks,threads,0,stream>>>(vestige,d_Input,d_Output,i);
i++;
nMat = blocks.x;
if (blocks.x==1) k=-1;
}
}
#ifdef __DPREC__
__global__ void LUDecomposition (int vestige, double2 * inputMatrices){
#else
__global__ void LUDecomposition (int vestige, float2 * inputMatrices){
#endif
// Broadcast the constant memory to registers
int lmem_nMperBl = nMperBl_GPU;
int lmem_matSide = matSide_GPU;
int lmem_scrSpace = scrSpace_GPU;
int lmem_scrSpace2 = scrSpace2_GPU;
int lmem_loadSize = loadSize_GPU;
int lmem_warpPerMat = warpPerMat_GPU;
#ifdef __DPREC__
double2 sum, dum,tmp;
__shared__ double sign [ lmem_nMperBl ]; // permutation sign, for determinant
volatile __shared__ double scale [ lmem_nMperBl * lmem_matSide ]; // scaling information, store largest value for each row
volatile __shared__ double2 reduce [ lmem_nMperBl * lmem_scrSpace ]; // a reduction buffer
volatile __shared__ double2 vectors [ lmem_nMperBl * lmem_scrSpace ]; // scratch space for main steps in algorithm
#else
float2 sum, dum,tmp;
float den;
extern __shared__ float BANK[];
volatile float *sign = (float*)BANK;
volatile float *scale = (float*)&sign[lmem_nMperBl];
volatile float2 *reduce = (float2*)&scale[lmem_nMperBl*lmem_matSide];
volatile float2 *vectors= (float2*)&reduce[lmem_nMperBl*lmem_scrSpace];
/*
__shared__ float sign[4];
__shared__ volatile float scale[4*16];
__shared__ volatile float2 reduce[4*16];
__shared__ volatile float2 vectors[4*16];
__shared__ volatile int indices[4*16];*/
#endif
volatile int *indices = (int*)&vectors[lmem_nMperBl*lmem_scrSpace];
// Index to matrix for processing
int myMatrix = threadIdx.x / lmem_scrSpace;
// Index to vector for processing
int vectorIndex = threadIdx.x % lmem_scrSpace;
// Initialize permutation signs
#ifdef __DPREC__
sign[threadIdx.x % lmem_nMperBl]=1.0;
#else
sign[threadIdx.x % lmem_nMperBl]=1.0f;
#endif
// book-keeping; if last block, only need vestige * lmem_scrSpace threads
if (blockIdx.x == (gridDim.x-1))
if (threadIdx.x >= vestige * lmem_scrSpace)
return;
// offset for this block
int memoryStride = ( blockIdx.x * lmem_nMperBl * lmem_matSide * lmem_loadSize );
// initialize memory
vectors [ threadIdx.x ].x = -1e24;
vectors [ threadIdx.x ].y = -1e24;
// Determine scaling information
for (int i=0; i < lmem_matSide; ++i){
// load memory
if ( vectorIndex < lmem_loadSize ){
vectors [ vec_index ].x = inputMatrices [ row_i_index ].x;
vectors [ vec_index ].y = inputMatrices [ row_i_index ].y;
}
__syncthreads();
// find maxima
vOP::findVectorMaxima ( vectors, vectorIndex, myMatrix, scrSpace_GPU, scrSpace2_GPU );
// write scaling information
if ( vectorIndex ==i ){
scale [ scale_index ] = abs ( vectors [ vec_00_scr ].x);
}
}
// initialize memory
vectors [ threadIdx.x ].x = 0.0f;
vectors [ threadIdx.x ].y = 0.0f;
//__syncthreads();
// main loops
float2 tmpr;
float2 tmpl;
int myWarp = vectorIndex / 32;
for (int j=0; j<lmem_matSide; j++){
// load the j column to shared
if ( vectorIndex < lmem_matSide ){
vectors [ vec_index ].x = inputMatrices [ col_j_index ].x;
vectors [ vec_index ].y = inputMatrices [ col_j_index ].y;
}
// update the j column
__syncthreads();
for (int i=0; i<lmem_warpPerMat; i++){
if (myWarp==i){
if ( vectorIndex < j ){
sum.x = vectors[vec_index].x;
sum.y = vectors[vec_index].y;
for (int k=0; k< 512; k++){
if (k >= vectorIndex) break;
tmpl.x = inputMatrices[col_k_index].x;
tmpl.y = inputMatrices[col_k_index].y;
tmpr.x = vectors [ vec_k_scr ].x;
tmpr.y = vectors [ vec_k_scr ].y;
sum.x -= (tmpl.x * tmpr.x - tmpl.y * tmpr.y);
sum.y -= (tmpl.y * tmpr.x + tmpl.x * tmpr.y);
vectors[vec_index].x = sum.x;
vectors[vec_index].y = sum.y;
}
}
}
__syncthreads();
}
__syncthreads();
for (int i=0; i<lmem_warpPerMat; i++){
if(myWarp==i) {
if ((vectorIndex >=j) && (vectorIndex < lmem_matSide)){
sum.x = vectors[vec_index].x;
sum.y = vectors[vec_index].y;
for (int k=0; k< j; k++){
tmpl.x = inputMatrices [ col_k_index ].x;
tmpl.y = inputMatrices [ col_k_index ].y;
tmpr.x = vectors [ vec_k_scr ].x;
tmpr.y = vectors [ vec_k_scr ].y;
sum.x -= (tmpl.x * tmpr.x - tmpl.y * tmpr.y);
sum.y -= (tmpl.y * tmpr.x + tmpl.x * tmpr.y);
vectors[vec_index].x=sum.x;
vectors[vec_index].y=sum.y;
}
}
}
}
__syncthreads();
// write j column back to global
if ( vectorIndex < lmem_matSide ){
inputMatrices [ col_j_index ].x = vectors [ vec_index ].x;
inputMatrices [ col_j_index ].y = vectors [ vec_index ].y;
}
// initialize shared memory
reduce [ threadIdx.x ].x = -1e24;
reduce [ threadIdx.x ].y = -1e24;
//__syncthreads();
if ((vectorIndex >= j) && (vectorIndex < lmem_matSide)){
// init for pivot search
reduce [ vec_index - j ].x = abs ( vectors [ vec_index ].x ) / scale [ scale_index ];
indices [ vec_index - j ] = vectorIndex;
}
__syncthreads();
vOP::findVectorMaximaKey ( reduce, indices, vectorIndex, myMatrix, scrSpace_GPU, scrSpace2_GPU );
//__syncthreads();
// possible row swap
if (j != indices [ vec_00_scr ]){
if (vectorIndex < lmem_loadSize){
int i = indices [ vec_00_scr ];
// each thread swaps one row element with another row element
sum = inputMatrices [ row_i_index ];
inputMatrices [ row_i_index ] = inputMatrices [ row_j_index ];
inputMatrices [ row_j_index ] = sum;
if (vectorIndex==0){
scale [ vec_i_sca ] = scale [ vec_j_sca ];
sign [ myMatrix ] *= -1.0f;
}
}
}
__syncthreads();
// final scaling
if ( j != lmem_matSide-1){
dum = inputMatrices [ diag_j_index ];
if ((vectorIndex >= j+1) && (vectorIndex < lmem_matSide)){
tmp = inputMatrices [ col_j_index ];
// Perform division tmp/dum
den = dum.x*dum.x + dum.y*dum.y;
tmpl.x = (tmp.x*dum.x + tmp.y*dum.y)/den;
tmpl.y = (tmp.y*dum.x - tmp.x*dum.y)/den;
tmp.x = tmpl.x;
tmp.y = tmpl.y;
inputMatrices [ col_j_index ] = tmp;
}
}
__syncthreads();
}// end j loops
#ifdef WRITE_DET
// init
vectors [ vec_index].x = 1.0f;
vectors [ vec_index].y = 0.0f;
// load diags
if (vectorIndex < lmem_matSide ){
vectors [ vec_index ].x = inputMatrices [ diag_index ].x;
vectors [ vec_index ].y = inputMatrices [ diag_index ].y;
}
__syncthreads();
// pop in sign
if (vectorIndex == 0 ){
vectors [ vec_00_scr ].x *= sign [ myMatrix ];
vectors [ vec_00_scr ].y *= sign [ myMatrix ];
}
__syncthreads();
#ifdef PERFORM_PROD
// every second row will contain product of two dets
if ((myMatrix %2) ==1){
// compiler complains about 'no copy constructor for float2'
// so doing this explicitly
#ifdef __DPREC__
double2 tmpA, tmpB;
#else
float2 tmpA, tmpB;
#endif
tmpA.x = vectors [ vec_1_index].x;
tmpA.y = vectors [ vec_1_index].y;
tmpB.x = vectors [ vec_index].x;
tmpB.y = vectors [ vec_index].y;
vectors [ vec_1_index ].x = tmpA.x*tmpB.x-tmpA.y*tmpB.y;
vectors [ vec_1_index ].y = tmpA.y*tmpB.x+tmpA.x*tmpB.y;
}
__syncthreads();
#endif
// calculate determinants
vOP::findVectorProduct(vectors, vectorIndex, myMatrix, scrSpace_GPU, scrSpace2_GPU);
__syncthreads();
// write out
if (vectorIndex==0){
inputMatrices [ mat_00_index ].x = vectors [ vec_00_scr ].x ;
inputMatrices [ mat_00_index ].y = vectors [ vec_00_scr ].y ;
}
#endif
}
#ifdef __DPREC__
#else
__global__ void reductionGlobalSum(int vestige, float2 *input, float2 *output, int it){
#endif
int t_id;
if (it == 0){
// first load every second element
t_id = 2*loadSize_GPU * matSide_GPU* (RED_SUM_SIZE * blockIdx.x + threadIdx.x);
}else{
t_id = RED_SUM_SIZE * blockIdx.x + threadIdx.x;
}
__shared__ volatile float2 scratch[RED_SUM_SIZE];
// init
scratch[threadIdx.x].x = 0.0f;
scratch[threadIdx.x].y = 0.0f;
__syncthreads();
// load
if ( blockIdx.x != (gridDim.x-1) ){
scratch[ threadIdx.x ].x = input[ t_id ].x;
scratch[ threadIdx.x ].y = input[ t_id ].y;
}
else{
if (threadIdx.x < vestige){
scratch[ threadIdx.x ].x = input[ t_id ].x;
scratch[ threadIdx.x ].y = input[ t_id ].y;
}
}
__syncthreads();
#if RED_SUM_SIZE == 1024
if (threadIdx.x < 512){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 512 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 512 ].y;
}
__syncthreads();
#endif
#if RED_SUM_SIZE >= 512
if (threadIdx.x < 256){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 256 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 256 ].y;
}
__syncthreads();
#endif
#if RED_SUM_SIZE >= 256
if (threadIdx.x < 128){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 128 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 128 ].y;
}
__syncthreads();
#endif
#if RED_SUM_SIZE >= 128
if (threadIdx.x < 64){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 64 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 64 ].y;
}
__syncthreads();
#endif
#if RED_SUM_SIZE >= 64
if (threadIdx.x < 32){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 32 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 32 ].y;
}
#endif
#if RED_SUM_SIZE >= 32
if (threadIdx.x < 16){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x+16 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x+16 ].y;
}
#endif
if (threadIdx.x < 8){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 8 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 8 ].y;
}
if (threadIdx.x < 4){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 4 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 4 ].y;
}
if (threadIdx.x < 2){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 2 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 2 ].y;
}
if (threadIdx.x < 1){
scratch [ threadIdx.x ].x += scratch [ threadIdx.x + 1 ].x;
scratch [ threadIdx.x ].y += scratch [ threadIdx.x + 1 ].y;
}
if (threadIdx.x == 0){
output[0].x = scratch[0].x;
output[0].y = scratch[0].y;
}
}
}
|
d3b550218d19630377c736344a999a5e04504114.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Modified from
// https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu
/*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <stdio.h>
#define THREADS_PER_BLOCK 16
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
//#define DEBUG
const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8;
__device__ const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(double _x, double _y) { x = _x, y = _y; }
__device__ void set(float _x, float _y) {
x = _x;
y = _y;
}
__device__ Point operator+(const Point &b) const {
return Point(x + b.x, y + b.y);
}
__device__ Point operator-(const Point &b) const {
return Point(x - b.x, y - b.y);
}
};
__device__ inline float cross(const Point &a, const Point &b) {
return a.x * b.y - a.y * b.x;
}
__device__ inline float cross(const Point &p1, const Point &p2,
const Point &p0) {
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
__device__ int check_rect_cross(const Point &p1, const Point &p2,
const Point &q1, const Point &q2) {
int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) &&
min(q1.x, q2.x) <= max(p1.x, p2.x) &&
min(p1.y, p2.y) <= max(q1.y, q2.y) &&
min(q1.y, q2.y) <= max(p1.y, p2.y);
return ret;
}
__device__ inline int check_in_box2d(const float *box, const Point &p) {
// params: box (5) [x1, y1, x2, y2, angle]
const float MARGIN = 1e-5;
float center_x = (box[0] + box[2]) / 2;
float center_y = (box[1] + box[3]) / 2;
float angle_cos = cos(-box[4]),
angle_sin =
sin(-box[4]); // rotate the point in the opposite direction of box
float rot_x =
(p.x - center_x) * angle_cos - (p.y - center_y) * angle_sin + center_x;
float rot_y =
(p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y;
#ifdef DEBUG
printf("box: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", box[0], box[1], box[2],
box[3], box[4]);
printf(
"center: (%.3f, %.3f), cossin(%.3f, %.3f), src(%.3f, %.3f), rot(%.3f, "
"%.3f)\n",
center_x, center_y, angle_cos, angle_sin, p.x, p.y, rot_x, rot_y);
#endif
return (rot_x > box[0] - MARGIN && rot_x < box[2] + MARGIN &&
rot_y > box[1] - MARGIN && rot_y < box[3] + MARGIN);
}
__device__ inline int intersection(const Point &p1, const Point &p0,
const Point &q1, const Point &q0,
Point &ans) {
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if (fabs(s5 - s1) > EPS) {
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
} else {
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
__device__ inline void rotate_around_center(const Point ¢er,
const float angle_cos,
const float angle_sin, Point &p) {
float new_x =
(p.x - center.x) * angle_cos - (p.y - center.y) * angle_sin + center.x;
float new_y =
(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y);
}
__device__ inline int point_cmp(const Point &a, const Point &b,
const Point ¢er) {
return atan2(a.y - center.y, a.x - center.x) >
atan2(b.y - center.y, b.x - center.x);
}
__device__ inline float box_overlap(const float *box_a, const float *box_b) {
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = box_a[3],
a_angle = box_a[4];
float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = box_b[3],
b_angle = box_b[4];
Point center_a((a_x1 + a_x2) / 2, (a_y1 + a_y2) / 2);
Point center_b((b_x1 + b_x2) / 2, (b_y1 + b_y2) / 2);
#ifdef DEBUG
printf(
"a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n",
a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle);
printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y,
center_b.x, center_b.y);
#endif
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++) {
#ifdef DEBUG
printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k,
box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x,
box_b_corners[k].y);
#endif
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
#ifdef DEBUG
printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x,
box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
#endif
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
flag = intersection(box_a_corners[i + 1], box_a_corners[i],
box_b_corners[j + 1], box_b_corners[j],
cross_points[cnt]);
if (flag) {
poly_center = poly_center + cross_points[cnt];
cnt++;
}
}
}
// check corners
for (int k = 0; k < 4; k++) {
if (check_in_box2d(box_a, box_b_corners[k])) {
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
}
if (check_in_box2d(box_b, box_a_corners[k])) {
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++) {
for (int i = 0; i < cnt - j - 1; i++) {
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) {
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
#ifdef DEBUG
printf("cnt=%d\n", cnt);
for (int i = 0; i < cnt; i++) {
printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x,
cross_points[i].y);
}
#endif
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++) {
area += cross(cross_points[k] - cross_points[0],
cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
__device__ inline float iou_bev(const float *box_a, const float *box_b) {
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
float sa = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]);
float sb = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]);
float s_overlap = box_overlap(box_a, box_b);
return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
}
__global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_overlap) {
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) {
return;
}
const float *cur_box_a = boxes_a + a_idx * 5;
const float *cur_box_b = boxes_b + b_idx * 5;
float s_overlap = box_overlap(cur_box_a, cur_box_b);
ans_overlap[a_idx * num_b + b_idx] = s_overlap;
}
__global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_iou) {
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) {
return;
}
const float *cur_box_a = boxes_a + a_idx * 5;
const float *cur_box_b = boxes_b + b_idx * 5;
float cur_iou_bev = iou_bev(cur_box_a, cur_box_b);
ans_iou[a_idx * num_b + b_idx] = cur_iou_bev;
}
__global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, unsigned long long *mask) {
// params: boxes (N, 5) [x1, y1, x2, y2, ry]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_bev(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
__device__ inline float iou_normal(float const *const a, float const *const b) {
float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]);
float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]);
float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0]) * (a[3] - a[1]);
float Sb = (b[2] - b[0]) * (b[3] - b[1]);
return interS / fmaxf(Sa + Sb - interS, EPS);
}
__global__ void nms_normal_kernel(const int boxes_num,
const float nms_overlap_thresh,
const float *boxes,
unsigned long long *mask) {
// params: boxes (N, 5) [x1, y1, x2, y2, ry]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_normal(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void boxesoverlapLauncher(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_overlap) {
dim3 blocks(
DIVUP(num_b, THREADS_PER_BLOCK),
DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
hipLaunchKernelGGL(( boxes_overlap_kernel), dim3(blocks), dim3(threads), 0, 0, num_a, boxes_a, num_b, boxes_b,
ans_overlap);
#ifdef DEBUG
hipDeviceSynchronize(); // for using printf in kernel function
#endif
}
void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b,
const float *boxes_b, float *ans_iou) {
dim3 blocks(
DIVUP(num_b, THREADS_PER_BLOCK),
DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
hipLaunchKernelGGL(( boxes_iou_bev_kernel), dim3(blocks), dim3(threads), 0, 0, num_a, boxes_a, num_b, boxes_b,
ans_iou);
}
void nmsLauncher(const float *boxes, unsigned long long *mask, int boxes_num,
float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes, mask);
}
void nmsNormalLauncher(const float *boxes, unsigned long long *mask,
int boxes_num, float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
hipLaunchKernelGGL(( nms_normal_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes,
mask);
}
| d3b550218d19630377c736344a999a5e04504114.cu | // Modified from
// https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu
/*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <stdio.h>
#define THREADS_PER_BLOCK 16
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
//#define DEBUG
const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8;
__device__ const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(double _x, double _y) { x = _x, y = _y; }
__device__ void set(float _x, float _y) {
x = _x;
y = _y;
}
__device__ Point operator+(const Point &b) const {
return Point(x + b.x, y + b.y);
}
__device__ Point operator-(const Point &b) const {
return Point(x - b.x, y - b.y);
}
};
__device__ inline float cross(const Point &a, const Point &b) {
return a.x * b.y - a.y * b.x;
}
__device__ inline float cross(const Point &p1, const Point &p2,
const Point &p0) {
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
__device__ int check_rect_cross(const Point &p1, const Point &p2,
const Point &q1, const Point &q2) {
int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) &&
min(q1.x, q2.x) <= max(p1.x, p2.x) &&
min(p1.y, p2.y) <= max(q1.y, q2.y) &&
min(q1.y, q2.y) <= max(p1.y, p2.y);
return ret;
}
__device__ inline int check_in_box2d(const float *box, const Point &p) {
// params: box (5) [x1, y1, x2, y2, angle]
const float MARGIN = 1e-5;
float center_x = (box[0] + box[2]) / 2;
float center_y = (box[1] + box[3]) / 2;
float angle_cos = cos(-box[4]),
angle_sin =
sin(-box[4]); // rotate the point in the opposite direction of box
float rot_x =
(p.x - center_x) * angle_cos - (p.y - center_y) * angle_sin + center_x;
float rot_y =
(p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y;
#ifdef DEBUG
printf("box: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", box[0], box[1], box[2],
box[3], box[4]);
printf(
"center: (%.3f, %.3f), cossin(%.3f, %.3f), src(%.3f, %.3f), rot(%.3f, "
"%.3f)\n",
center_x, center_y, angle_cos, angle_sin, p.x, p.y, rot_x, rot_y);
#endif
return (rot_x > box[0] - MARGIN && rot_x < box[2] + MARGIN &&
rot_y > box[1] - MARGIN && rot_y < box[3] + MARGIN);
}
__device__ inline int intersection(const Point &p1, const Point &p0,
const Point &q1, const Point &q0,
Point &ans) {
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if (fabs(s5 - s1) > EPS) {
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
} else {
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
__device__ inline void rotate_around_center(const Point ¢er,
const float angle_cos,
const float angle_sin, Point &p) {
float new_x =
(p.x - center.x) * angle_cos - (p.y - center.y) * angle_sin + center.x;
float new_y =
(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y);
}
__device__ inline int point_cmp(const Point &a, const Point &b,
const Point ¢er) {
return atan2(a.y - center.y, a.x - center.x) >
atan2(b.y - center.y, b.x - center.x);
}
__device__ inline float box_overlap(const float *box_a, const float *box_b) {
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = box_a[3],
a_angle = box_a[4];
float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = box_b[3],
b_angle = box_b[4];
Point center_a((a_x1 + a_x2) / 2, (a_y1 + a_y2) / 2);
Point center_b((b_x1 + b_x2) / 2, (b_y1 + b_y2) / 2);
#ifdef DEBUG
printf(
"a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n",
a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle);
printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y,
center_b.x, center_b.y);
#endif
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++) {
#ifdef DEBUG
printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k,
box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x,
box_b_corners[k].y);
#endif
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
#ifdef DEBUG
printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x,
box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
#endif
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
flag = intersection(box_a_corners[i + 1], box_a_corners[i],
box_b_corners[j + 1], box_b_corners[j],
cross_points[cnt]);
if (flag) {
poly_center = poly_center + cross_points[cnt];
cnt++;
}
}
}
// check corners
for (int k = 0; k < 4; k++) {
if (check_in_box2d(box_a, box_b_corners[k])) {
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
}
if (check_in_box2d(box_b, box_a_corners[k])) {
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++) {
for (int i = 0; i < cnt - j - 1; i++) {
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) {
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
#ifdef DEBUG
printf("cnt=%d\n", cnt);
for (int i = 0; i < cnt; i++) {
printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x,
cross_points[i].y);
}
#endif
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++) {
area += cross(cross_points[k] - cross_points[0],
cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
__device__ inline float iou_bev(const float *box_a, const float *box_b) {
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
float sa = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]);
float sb = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]);
float s_overlap = box_overlap(box_a, box_b);
return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
}
__global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_overlap) {
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) {
return;
}
const float *cur_box_a = boxes_a + a_idx * 5;
const float *cur_box_b = boxes_b + b_idx * 5;
float s_overlap = box_overlap(cur_box_a, cur_box_b);
ans_overlap[a_idx * num_b + b_idx] = s_overlap;
}
__global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_iou) {
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) {
return;
}
const float *cur_box_a = boxes_a + a_idx * 5;
const float *cur_box_b = boxes_b + b_idx * 5;
float cur_iou_bev = iou_bev(cur_box_a, cur_box_b);
ans_iou[a_idx * num_b + b_idx] = cur_iou_bev;
}
__global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, unsigned long long *mask) {
// params: boxes (N, 5) [x1, y1, x2, y2, ry]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_bev(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
__device__ inline float iou_normal(float const *const a, float const *const b) {
float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]);
float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]);
float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0]) * (a[3] - a[1]);
float Sb = (b[2] - b[0]) * (b[3] - b[1]);
return interS / fmaxf(Sa + Sb - interS, EPS);
}
__global__ void nms_normal_kernel(const int boxes_num,
const float nms_overlap_thresh,
const float *boxes,
unsigned long long *mask) {
// params: boxes (N, 5) [x1, y1, x2, y2, ry]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_normal(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void boxesoverlapLauncher(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_overlap) {
dim3 blocks(
DIVUP(num_b, THREADS_PER_BLOCK),
DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
boxes_overlap_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b,
ans_overlap);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b,
const float *boxes_b, float *ans_iou) {
dim3 blocks(
DIVUP(num_b, THREADS_PER_BLOCK),
DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
boxes_iou_bev_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b,
ans_iou);
}
void nmsLauncher(const float *boxes, unsigned long long *mask, int boxes_num,
float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask);
}
void nmsNormalLauncher(const float *boxes, unsigned long long *mask,
int boxes_num, float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_normal_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes,
mask);
}
|
f3a9408eb0d1bd324b9fd17b40f8691cc58dff13.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include "common.h"
#include "flow1.h"
#include "flow3.h"
int main(int argc, char **argv)
{
if (argc != 4)
{
printf("Usage: %s 1 2 logfile.pre\n", argv[0]);
printf("1: number of runs\n");
printf("2: number of threads per block (for GPU version)\n");
return 0;
}
n_runs = atoi(argv[1]);
n_threads_per_block = atoi(argv[2]);
read_log(argv[argc-1]);
n_matrix = n_tasks*n_tasks;
int* matrix1 = flow1();
// print_matrix(matrix1, n_matrix);
int* matrix3 = flow3();
// print_matrix(matrix3, n_matrix);
printf("Check matrix: ");
if (memcmp(matrix1, matrix3, n_matrix*sizeof(int)) == 0)
{
printf("OK");
}
else
{
printf("Not OK");
}
printf("\n");
free(matrix1);
free(matrix3);
free_log();
return 0;
}
| f3a9408eb0d1bd324b9fd17b40f8691cc58dff13.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include "common.h"
#include "flow1.h"
#include "flow3.h"
int main(int argc, char **argv)
{
if (argc != 4)
{
printf("Usage: %s 1 2 logfile.pre\n", argv[0]);
printf("1: number of runs\n");
printf("2: number of threads per block (for GPU version)\n");
return 0;
}
n_runs = atoi(argv[1]);
n_threads_per_block = atoi(argv[2]);
read_log(argv[argc-1]);
n_matrix = n_tasks*n_tasks;
int* matrix1 = flow1();
// print_matrix(matrix1, n_matrix);
int* matrix3 = flow3();
// print_matrix(matrix3, n_matrix);
printf("Check matrix: ");
if (memcmp(matrix1, matrix3, n_matrix*sizeof(int)) == 0)
{
printf("OK");
}
else
{
printf("Not OK");
}
printf("\n");
free(matrix1);
free(matrix3);
free_log();
return 0;
}
|
22263eef26e46d698bc5d9a0559363581b77a4ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
/**
* Computes the log of reaction rate.
* @param a: Pointer to coefficient matrix.
* @param temp: Pointer to temperature array.
* @param lam: Matrix to write the results to.
* @param nsets: Number of sets / number of rows in coefficient matrix.
* @param ncells: Number of cells / length of temperature array.
* @param ncoeff: Number of coefficients / number of columns in coefficient matrix.
*/
template <class dtype>
__device__ void rates(dtype *a, dtype *temp, dtype *lam, int nsets, int ncells, int ncoeff)
{
int istart = blockIdx.x * blockDim.x + threadIdx.x;
int istep = blockDim.x * gridDim.x;
int jstart = blockIdx.y * blockDim.y + threadIdx.y;
int jstep = blockDim.y * gridDim.y;
int kstart = blockIdx.z * blockDim.z + threadIdx.z;
int kstep = blockDim.z * gridDim.z;
for(int i = istart; i < nsets; i += istep)
{
for(int j = jstart; j < ncells; j += jstep)
{
dtype temp9 = temp[j] * 1.0e-9;
for(int k = kstart; k < ncoeff; k += kstep)
{
switch(k)
{
case 0:
atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k]);
break;
case 6:
atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k] * log(temp9));
break;
default:
atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k] * pow(temp9, (2 * k - 5) / 3.0));
break;
}
}
}
}
}
template <>
__device__ void rates<float>(float *a, float *temp, float *lam, int nsets, int ncells, int ncoeff)
{
int istart = blockIdx.x * blockDim.x + threadIdx.x;
int istep = blockDim.x * gridDim.x;
int jstart = blockIdx.y * blockDim.y + threadIdx.y;
int jstep = blockDim.y * gridDim.y;
int kstart = blockIdx.z * blockDim.z + threadIdx.z;
int kstep = blockDim.z * gridDim.z;
for(int i = istart; i < nsets; i += istep)
{
for(int j = jstart; j < ncells; j += jstep)
{
float temp9 = temp[j] * 1.0e-9;
for(int k = kstart; k < ncoeff; k += kstep)
{
switch(k)
{
case 0:
atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k]);
break;
case 6:
atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k] * logf(temp9));
break;
default:
atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k] * powf(temp9, (2 * k - 5) / 3.0f));
break;
}
}
}
}
}
template <class dtype, int nsets, int ncells, int ncoeff>
__global__ void exec(dtype* lam)
{
// Tensors
__shared__ dtype a[nsets * ncoeff];
__shared__ dtype temp[ncells];
int xInd = blockIdx.x * blockDim.x + threadIdx.x;
int yInd = blockIdx.y * blockDim.y + threadIdx.y;
int ySize = blockDim.y * gridDim.y;
int zInd = blockIdx.z * blockDim.z + threadIdx.z;
int zSize = blockDim.z * gridDim.z;
int ind = xInd * ySize * zSize + yInd * zSize + zInd;
/********************************
* Initialize coefficient matrix *
********************************/
if(ind < nsets * ncoeff)
{
if(ind % ncoeff != 7)
{
a[ind] = ind - (ind / ncoeff - 1);
}
else
{
a[ind] = 0.0;
}
}
/******************************************
* Initialize the temperature in each cell *
******************************************/
if(ind < ncells)
{
temp[ind] = (ind + 1) * 1e9;
}
/****************************
* Zero the array of results *
****************************/
if(ind < nsets * ncells)
{
lam[ind] = 0.0;
}
/*******************************************
* Compute ln(lambda) for each set and cell *
*******************************************/
rates<dtype>(a, temp, lam, nsets, ncells, ncoeff);
}
int main()
{
// Tensor dimensions
const int nsets = 4, ncells = 4, ncoeff = 8;
// Results and elapsed time
float *lam;
hipMallocManaged(&lam, nsets * ncells * sizeof(float));
// Compute the rates
dim3 threadsPerBlock(nsets, ncells, ncoeff);
dim3 numBlocks(1, 1, 1);
hipLaunchKernelGGL(( exec<float, nsets, ncells, ncoeff>), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, lam);
// Print ln(lambda)
hipDeviceSynchronize();
printf("lambda:\n");
for(int i = 0; i < nsets; i++)
{
for(int j = 0; j < ncells; j++)
{
printf("%.3f\t", lam[i * ncells + j]);
}
printf("\n");
}
return 0;
}
| 22263eef26e46d698bc5d9a0559363581b77a4ce.cu | #include <stdio.h>
#include <stdlib.h>
/**
* Computes the log of reaction rate.
* @param a: Pointer to coefficient matrix.
* @param temp: Pointer to temperature array.
* @param lam: Matrix to write the results to.
* @param nsets: Number of sets / number of rows in coefficient matrix.
* @param ncells: Number of cells / length of temperature array.
* @param ncoeff: Number of coefficients / number of columns in coefficient matrix.
*/
template <class dtype>
__device__ void rates(dtype *a, dtype *temp, dtype *lam, int nsets, int ncells, int ncoeff)
{
int istart = blockIdx.x * blockDim.x + threadIdx.x;
int istep = blockDim.x * gridDim.x;
int jstart = blockIdx.y * blockDim.y + threadIdx.y;
int jstep = blockDim.y * gridDim.y;
int kstart = blockIdx.z * blockDim.z + threadIdx.z;
int kstep = blockDim.z * gridDim.z;
for(int i = istart; i < nsets; i += istep)
{
for(int j = jstart; j < ncells; j += jstep)
{
dtype temp9 = temp[j] * 1.0e-9;
for(int k = kstart; k < ncoeff; k += kstep)
{
switch(k)
{
case 0:
atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k]);
break;
case 6:
atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k] * log(temp9));
break;
default:
atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k] * pow(temp9, (2 * k - 5) / 3.0));
break;
}
}
}
}
}
template <>
__device__ void rates<float>(float *a, float *temp, float *lam, int nsets, int ncells, int ncoeff)
{
int istart = blockIdx.x * blockDim.x + threadIdx.x;
int istep = blockDim.x * gridDim.x;
int jstart = blockIdx.y * blockDim.y + threadIdx.y;
int jstep = blockDim.y * gridDim.y;
int kstart = blockIdx.z * blockDim.z + threadIdx.z;
int kstep = blockDim.z * gridDim.z;
for(int i = istart; i < nsets; i += istep)
{
for(int j = jstart; j < ncells; j += jstep)
{
float temp9 = temp[j] * 1.0e-9;
for(int k = kstart; k < ncoeff; k += kstep)
{
switch(k)
{
case 0:
atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k]);
break;
case 6:
atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k] * logf(temp9));
break;
default:
atomicAdd(&lam[i * ncells + j], a[i * ncoeff + k] * powf(temp9, (2 * k - 5) / 3.0f));
break;
}
}
}
}
}
template <class dtype, int nsets, int ncells, int ncoeff>
__global__ void exec(dtype* lam)
{
// Tensors
__shared__ dtype a[nsets * ncoeff];
__shared__ dtype temp[ncells];
int xInd = blockIdx.x * blockDim.x + threadIdx.x;
int yInd = blockIdx.y * blockDim.y + threadIdx.y;
int ySize = blockDim.y * gridDim.y;
int zInd = blockIdx.z * blockDim.z + threadIdx.z;
int zSize = blockDim.z * gridDim.z;
int ind = xInd * ySize * zSize + yInd * zSize + zInd;
/********************************
* Initialize coefficient matrix *
********************************/
if(ind < nsets * ncoeff)
{
if(ind % ncoeff != 7)
{
a[ind] = ind - (ind / ncoeff - 1);
}
else
{
a[ind] = 0.0;
}
}
/******************************************
* Initialize the temperature in each cell *
******************************************/
if(ind < ncells)
{
temp[ind] = (ind + 1) * 1e9;
}
/****************************
* Zero the array of results *
****************************/
if(ind < nsets * ncells)
{
lam[ind] = 0.0;
}
/*******************************************
* Compute ln(lambda) for each set and cell *
*******************************************/
rates<dtype>(a, temp, lam, nsets, ncells, ncoeff);
}
int main()
{
// Tensor dimensions
const int nsets = 4, ncells = 4, ncoeff = 8;
// Results and elapsed time
float *lam;
cudaMallocManaged(&lam, nsets * ncells * sizeof(float));
// Compute the rates
dim3 threadsPerBlock(nsets, ncells, ncoeff);
dim3 numBlocks(1, 1, 1);
exec<float, nsets, ncells, ncoeff><<<numBlocks, threadsPerBlock>>>(lam);
// Print ln(lambda)
cudaDeviceSynchronize();
printf("lambda:\n");
for(int i = 0; i < nsets; i++)
{
for(int j = 0; j < ncells; j++)
{
printf("%.3f\t", lam[i * ncells + j]);
}
printf("\n");
}
return 0;
}
|
c76d75598efb6f3f97dfd189e7dda9f410393921.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* BestDiffKernel.cu
* heuristic CUDA
*
* Created by Roberto Roverso on 25/08/09.
* Copyright 2009 Peerialism. All rights reserved.
*
*/
#include <stdio.h>
//#include "Difference.c"
//#define KERNEL_DEBUG
#define NEGINF -9999.0f
#define BLOCKSIZE 16
__device__ float m(AijMatrix aijMatrix, int row, int col) {
return aijMatrix.els[row * aijMatrix.width + col];
}
__device__ void setEmptyDiff(Difference* diff) {
diff->index = -1;
diff->myAssigned = -1;
diff->bestChangeAssigned = -1;
}
__device__ void setDifferenceProps(Difference* diff, int idx, int bestChange,
int myAssigned, int bestChangeAssigned, float value) {
diff->index = idx;
diff->bestChange = bestChange;
// diff->type = type;
diff->myAssigned = myAssigned;
diff->bestChangeAssigned = bestChangeAssigned;
diff->value = value;
}
__device__ void bestDiffInternalRepeat(int rowId, Difference* diffs,float* srtDiffs,
AijMatrix A, int* persons, int* objects, char* bannedSwitches,
char* clearedBannedSwitches) {
float maxDiff = 0.009f;
// if (persons[rowId] == -1 || reset[0] == 1)
// {
int bestChangeCol = -1;
int myCol = persons[rowId];
if (myCol == -1)
maxDiff = NEGINF;
int myRow = rowId;
int foundFreeObject = 0;
int otherCol;
int m1 = 0;
int m2 = A.width;
// if (reset[0] == 0)
// {
//
// m1 = blockDim.x * blockIdx.x;
// m2 = blockDim.x * (blockIdx.x + 1);
// }
for (otherCol = m1; otherCol < m2; otherCol++)
{
int otherRow = objects[otherCol];
float difference = NEGINF;
// Person is not assigned
if (myCol == -1)
{
// Object considered not assigned
if (otherRow == -1)
{
// happiness value for the per-obj association
difference = m(A, myRow, otherCol);
if (foundFreeObject == 0)
{
maxDiff = difference;
bestChangeCol = otherCol;
}
foundFreeObject = 1;
} else if (foundFreeObject == 0 && !bannedSwitches[myRow * A.width
+ otherRow])
// object is not free
// Compare old case with new case
// pos...better me
// neg...better him
difference = m(A, myRow, otherCol) - m(A, otherRow, otherCol);
} else if (otherRow == -1)
// Compare old case with new case
difference = m(A, myRow, otherCol) - m(A, myRow, myCol);
else if (m(A, otherRow, myCol) != NEGINF)
{
// Both assigned
// Switch improves overall happiness of the two assignments
difference = m(A, myRow, otherCol) + m(A, otherRow, myCol) - (m(A,
myRow, myCol) + m(A, otherRow, otherCol));
}
if (difference > maxDiff)
{
maxDiff = difference;
bestChangeCol = otherCol;
}
}
//#ifdef KERNEL_DEBUG
// printf("D%d -> %f\n",rowId,maxDiff);
//#endif
if (maxDiff < 0)
{
maxDiff = -maxDiff;
}
if (maxDiff > 0.1 || myCol == -1)
{
if (bestChangeCol == -1)
{
if (clearedBannedSwitches[myRow])
{
persons[myRow] = -1;
setEmptyDiff(&diffs[myRow]);
srtDiffs[rowId]=0.0f;
return;
}
return;
}
if (myCol == -1)
maxDiff = maxDiff * 1000;
setDifferenceProps(&diffs[rowId], rowId, bestChangeCol, persons[rowId],
objects[bestChangeCol], maxDiff);
srtDiffs[rowId]=maxDiff;
return;
}
setEmptyDiff(&diffs[myRow]);
srtDiffs[rowId]=0.0f;
return;
// }
}
__global__ void evaluateDiff(AijMatrix A, Difference* diffs,float* srtDiffs, int* persons,
int* objects, char* bannedSwitches, char* clearedBannedSwitches) {
//__shared__ float diffs[BLOCKSIZE];
float maxDiff = 0.009f;
// Values needed for calculation
int rowId = blockIdx.x * blockDim.x + threadIdx.x;
// if (persons[rowId] == -1 || reset[0] == 1)
// {
int myCol = persons[rowId];
if (myCol == -1)
maxDiff = NEGINF;
int myRow = rowId;
int foundFreeObject = 0;
int otherCol;
int bestChangeCol=-1;
int m1 = 0;
int m2 = A.width;
// if (reset[0] == 0)
// {
//
// m1 = blockDim.x * blockIdx.x;
// m2 = blockDim.x * (blockIdx.x + 1);
// }
for (otherCol = m1; otherCol < m2; otherCol++)
{
int otherRow = objects[otherCol];
float difference = NEGINF;
// Person is not assigned
if (myCol == -1)
{
// Object considered not assigned
if (otherRow == -1)
{
// happiness value for the per-obj association
difference = m(A, myRow, otherCol);
if (foundFreeObject == 0)
{
maxDiff = difference;
bestChangeCol = otherCol;
}
foundFreeObject = 1;
} else if (foundFreeObject == 0 && !bannedSwitches[myRow * A.width
+ otherRow])
// object is not free
// Compare old case with new case
// pos...better me
// neg...better him
difference = m(A, myRow, otherCol) - m(A, otherRow, otherCol);
} else if (otherRow == -1)
// Compare old case with new case
difference = m(A, myRow, otherCol) - m(A, myRow, myCol);
else if (m(A, otherRow, myCol) != NEGINF)
{
// Both assigned
// Switch improves overall happiness of the two assignments
difference = m(A, myRow, otherCol) + m(A, otherRow, myCol) - (m(A,
myRow, myCol) + m(A, otherRow, otherCol));
}
if (difference > maxDiff)
{
maxDiff = difference;
bestChangeCol = otherCol;
}
}
//int tId = threadIdx.x;
//#ifdef KERNEL_DEBUG
// printf("D%d -> %f\n",rowId,maxDiff);
//#endif
// }
// int rowId = blockIdx.x * blockDim.x + threadIdx.x;
//if (rowId == 0 || (rowId % blockSize) == 0)
//{
//int blockStart = blockIdx.x * blockDim.x;
//int blockEnd = blockIdx.x * blockDim.x + BLOCKSIZE;
//int blockStart=0;
//int blockEnd=A.height;
//for (int var = blockStart; var < blockEnd; ++var)
//{
if (maxDiff < 0)
maxDiff = -maxDiff;
if (maxDiff > 0.1 || myCol == -1)
{
if (bestChangeCol == -1)
{
if (clearedBannedSwitches[rowId])
{
// No suitable assignment due to banning
persons[rowId] = -1;
setEmptyDiff(&diffs[myRow]);
srtDiffs[rowId]=0.0f;
return;
}
clearedBannedSwitches[rowId] = 1;
int x;
for (x = 0; x < A.height; x++)
bannedSwitches[rowId * A.width + x] = 0;
bestDiffInternalRepeat(rowId, diffs,srtDiffs, A, persons,
objects, bannedSwitches, clearedBannedSwitches);
return;
}
if (myCol == -1)
maxDiff = maxDiff * 1000;
setDifferenceProps(&diffs[rowId], rowId, bestChangeCol, persons[rowId],
objects[bestChangeCol], maxDiff);
srtDiffs[rowId]=maxDiff;
// curDiff.index = i;
// curDiff.bestChange = bestChangeCol;
// curDiff.type = 0;
// curDiff.myAssigned = persons[i];
// curDiff.bestChangeAssigned = objects[bestChangeCol];
// curDiff.value = maxDiff;
//#ifdef KERNEL_DEBUG
// printf("srtDiff[%d]=%f\n",rowId,srtDiffs[rowId]);
//#endif
return;
}
// Difference not worth to consider
setEmptyDiff(&diffs[myRow]);
srtDiffs[rowId]=0.0f;
return;
}
| c76d75598efb6f3f97dfd189e7dda9f410393921.cu | /*
* BestDiffKernel.cu
* heuristic CUDA
*
* Created by Roberto Roverso on 25/08/09.
* Copyright 2009 Peerialism. All rights reserved.
*
*/
#include <stdio.h>
//#include "Difference.c"
//#define KERNEL_DEBUG
#define NEGINF -9999.0f
#define BLOCKSIZE 16
__device__ float m(AijMatrix aijMatrix, int row, int col) {
return aijMatrix.els[row * aijMatrix.width + col];
}
__device__ void setEmptyDiff(Difference* diff) {
diff->index = -1;
diff->myAssigned = -1;
diff->bestChangeAssigned = -1;
}
__device__ void setDifferenceProps(Difference* diff, int idx, int bestChange,
int myAssigned, int bestChangeAssigned, float value) {
diff->index = idx;
diff->bestChange = bestChange;
// diff->type = type;
diff->myAssigned = myAssigned;
diff->bestChangeAssigned = bestChangeAssigned;
diff->value = value;
}
__device__ void bestDiffInternalRepeat(int rowId, Difference* diffs,float* srtDiffs,
AijMatrix A, int* persons, int* objects, char* bannedSwitches,
char* clearedBannedSwitches) {
float maxDiff = 0.009f;
// if (persons[rowId] == -1 || reset[0] == 1)
// {
int bestChangeCol = -1;
int myCol = persons[rowId];
if (myCol == -1)
maxDiff = NEGINF;
int myRow = rowId;
int foundFreeObject = 0;
int otherCol;
int m1 = 0;
int m2 = A.width;
// if (reset[0] == 0)
// {
//
// m1 = blockDim.x * blockIdx.x;
// m2 = blockDim.x * (blockIdx.x + 1);
// }
for (otherCol = m1; otherCol < m2; otherCol++)
{
int otherRow = objects[otherCol];
float difference = NEGINF;
// Person is not assigned
if (myCol == -1)
{
// Object considered not assigned
if (otherRow == -1)
{
// happiness value for the per-obj association
difference = m(A, myRow, otherCol);
if (foundFreeObject == 0)
{
maxDiff = difference;
bestChangeCol = otherCol;
}
foundFreeObject = 1;
} else if (foundFreeObject == 0 && !bannedSwitches[myRow * A.width
+ otherRow])
// object is not free
// Compare old case with new case
// pos...better me
// neg...better him
difference = m(A, myRow, otherCol) - m(A, otherRow, otherCol);
} else if (otherRow == -1)
// Compare old case with new case
difference = m(A, myRow, otherCol) - m(A, myRow, myCol);
else if (m(A, otherRow, myCol) != NEGINF)
{
// Both assigned
// Switch improves overall happiness of the two assignments
difference = m(A, myRow, otherCol) + m(A, otherRow, myCol) - (m(A,
myRow, myCol) + m(A, otherRow, otherCol));
}
if (difference > maxDiff)
{
maxDiff = difference;
bestChangeCol = otherCol;
}
}
//#ifdef KERNEL_DEBUG
// printf("D%d -> %f\n",rowId,maxDiff);
//#endif
if (maxDiff < 0)
{
maxDiff = -maxDiff;
}
if (maxDiff > 0.1 || myCol == -1)
{
if (bestChangeCol == -1)
{
if (clearedBannedSwitches[myRow])
{
persons[myRow] = -1;
setEmptyDiff(&diffs[myRow]);
srtDiffs[rowId]=0.0f;
return;
}
return;
}
if (myCol == -1)
maxDiff = maxDiff * 1000;
setDifferenceProps(&diffs[rowId], rowId, bestChangeCol, persons[rowId],
objects[bestChangeCol], maxDiff);
srtDiffs[rowId]=maxDiff;
return;
}
setEmptyDiff(&diffs[myRow]);
srtDiffs[rowId]=0.0f;
return;
// }
}
__global__ void evaluateDiff(AijMatrix A, Difference* diffs,float* srtDiffs, int* persons,
int* objects, char* bannedSwitches, char* clearedBannedSwitches) {
//__shared__ float diffs[BLOCKSIZE];
float maxDiff = 0.009f;
// Values needed for calculation
int rowId = blockIdx.x * blockDim.x + threadIdx.x;
// if (persons[rowId] == -1 || reset[0] == 1)
// {
int myCol = persons[rowId];
if (myCol == -1)
maxDiff = NEGINF;
int myRow = rowId;
int foundFreeObject = 0;
int otherCol;
int bestChangeCol=-1;
int m1 = 0;
int m2 = A.width;
// if (reset[0] == 0)
// {
//
// m1 = blockDim.x * blockIdx.x;
// m2 = blockDim.x * (blockIdx.x + 1);
// }
for (otherCol = m1; otherCol < m2; otherCol++)
{
int otherRow = objects[otherCol];
float difference = NEGINF;
// Person is not assigned
if (myCol == -1)
{
// Object considered not assigned
if (otherRow == -1)
{
// happiness value for the per-obj association
difference = m(A, myRow, otherCol);
if (foundFreeObject == 0)
{
maxDiff = difference;
bestChangeCol = otherCol;
}
foundFreeObject = 1;
} else if (foundFreeObject == 0 && !bannedSwitches[myRow * A.width
+ otherRow])
// object is not free
// Compare old case with new case
// pos...better me
// neg...better him
difference = m(A, myRow, otherCol) - m(A, otherRow, otherCol);
} else if (otherRow == -1)
// Compare old case with new case
difference = m(A, myRow, otherCol) - m(A, myRow, myCol);
else if (m(A, otherRow, myCol) != NEGINF)
{
// Both assigned
// Switch improves overall happiness of the two assignments
difference = m(A, myRow, otherCol) + m(A, otherRow, myCol) - (m(A,
myRow, myCol) + m(A, otherRow, otherCol));
}
if (difference > maxDiff)
{
maxDiff = difference;
bestChangeCol = otherCol;
}
}
//int tId = threadIdx.x;
//#ifdef KERNEL_DEBUG
// printf("D%d -> %f\n",rowId,maxDiff);
//#endif
// }
// int rowId = blockIdx.x * blockDim.x + threadIdx.x;
//if (rowId == 0 || (rowId % blockSize) == 0)
//{
//int blockStart = blockIdx.x * blockDim.x;
//int blockEnd = blockIdx.x * blockDim.x + BLOCKSIZE;
//int blockStart=0;
//int blockEnd=A.height;
//for (int var = blockStart; var < blockEnd; ++var)
//{
if (maxDiff < 0)
maxDiff = -maxDiff;
if (maxDiff > 0.1 || myCol == -1)
{
if (bestChangeCol == -1)
{
if (clearedBannedSwitches[rowId])
{
// No suitable assignment due to banning
persons[rowId] = -1;
setEmptyDiff(&diffs[myRow]);
srtDiffs[rowId]=0.0f;
return;
}
clearedBannedSwitches[rowId] = 1;
int x;
for (x = 0; x < A.height; x++)
bannedSwitches[rowId * A.width + x] = 0;
bestDiffInternalRepeat(rowId, diffs,srtDiffs, A, persons,
objects, bannedSwitches, clearedBannedSwitches);
return;
}
if (myCol == -1)
maxDiff = maxDiff * 1000;
setDifferenceProps(&diffs[rowId], rowId, bestChangeCol, persons[rowId],
objects[bestChangeCol], maxDiff);
srtDiffs[rowId]=maxDiff;
// curDiff.index = i;
// curDiff.bestChange = bestChangeCol;
// curDiff.type = 0;
// curDiff.myAssigned = persons[i];
// curDiff.bestChangeAssigned = objects[bestChangeCol];
// curDiff.value = maxDiff;
//#ifdef KERNEL_DEBUG
// printf("srtDiff[%d]=%f\n",rowId,srtDiffs[rowId]);
//#endif
return;
}
// Difference not worth to consider
setEmptyDiff(&diffs[myRow]);
srtDiffs[rowId]=0.0f;
return;
}
|
9d32b5c2cb81ffd6a21c91e381e5040621ea5dff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <limits>
#include "runtime/device/gpu/cuda_common.h"
#include "include/hip/hip_fp16.h"
#include "backend/kernel_compiler/gpu/cuda_impl/general_reduction_impl.cuh"
const int kWarpSize = 32;
const int kBlockSize = 512;
const int kWarpGroup = 4;
const int kNumWarps = kBlockSize / kWarpSize; // 16
const int kGroupSize = kWarpGroup * kWarpSize; // 128
// Mode selection constant
const int kMaxThreadLoop = 4;
const int kMaxWarpLoop = kWarpSize * 3; // 32 * 3 = 96
const int kMaxGroupLoop = kGroupSize * 3; // 128 * 3 =
// 384
template <typename T, typename S>
struct Cmp {
__device__ static inline bool lt(T a, T b, S i, S j) { return (a < b) || ((a == b) && (i < 0 || j < i)); }
__device__ static inline bool gt(T a, T b, S i, S j) { return (a > b) || ((a == b) && (i < 0 || j < i)); }
};
template <typename T>
inline __device__ void ConditionAssign(bool is_assign, T *x, const T &y) {
(*x) = is_assign ? y : (*x);
}
template <typename T, typename S>
__global__ void ThreadReduction(bool small, size_t outer_size, size_t bound, size_t inner_size, const T *input,
T *output, S *output_index, bool fp16_flag, T init_K) {
if (fp16_flag) {
init_K = small ? __int2half_rd(65504) : __int2half_rd(-65504);
}
const S init_V = static_cast<S>(-1);
for (int t_idx = blockIdx.x * blockDim.x + threadIdx.x; t_idx < outer_size * inner_size;
t_idx += blockDim.x * gridDim.x) {
int outer_id = t_idx / inner_size;
int inner_id = t_idx % inner_size;
T threadK = init_K;
S threadV = init_V;
for (int i = 0; i < bound; i++) {
T other_K = input[outer_id * bound * inner_size + i * inner_size + inner_id];
S other_V = i;
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
output[outer_id * inner_size + inner_id] = threadK;
output_index[outer_id * inner_size + inner_id] = threadV;
}
}
template <typename T, typename S>
__global__ void WarpReduction(bool small, size_t outer_size, size_t bound, size_t inner_size, const T *input, T *output,
S *output_index, bool fp16_flag, T init_K) {
if (fp16_flag) {
init_K = small ? __int2half_rd(65504) : __int2half_rd(-65504);
}
const S init_V = static_cast<S>(-1);
for (int t_idx = blockIdx.x * blockDim.x + threadIdx.x; t_idx < kWarpSize * outer_size * inner_size;
t_idx += blockDim.x * gridDim.x) {
int outer_id = t_idx / kWarpSize / inner_size;
int inner_id = t_idx / kWarpSize % inner_size;
int laneId = threadIdx.x % kWarpSize;
T threadK = init_K;
S threadV = init_V;
for (int i = laneId; i < bound; i += kWarpSize) {
T other_K = input[outer_id * bound * inner_size + i * inner_size + inner_id];
S other_V = i;
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
__syncwarp();
for (int offset = kWarpSize / 2; offset > 0; offset /= 2) {
T other_K = __shfl_down_sync(0xffffffff, threadK, offset);
S other_V = __shfl_down_sync(0xffffffff, threadV, offset);
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
__syncwarp();
if (laneId == 0) {
output[outer_id * inner_size + inner_id] = threadK;
output_index[outer_id * inner_size + inner_id] = threadV;
}
__syncthreads();
}
}
template <typename T, typename S>
__global__ void Warp4Reduction(bool small, size_t outer_size, size_t bound, size_t inner_size, const T *input,
T *output, S *output_index, bool fp16_flag, T init_K) {
__shared__ T shared_K[kNumWarps];
__shared__ S shared_V[kNumWarps];
if (fp16_flag) {
init_K = small ? __int2half_rd(65504) : __int2half_rd(-65504);
}
const S init_V = static_cast<S>(-1);
for (int t_idx = blockIdx.x * blockDim.x + threadIdx.x; t_idx < kGroupSize * outer_size * inner_size;
t_idx += blockDim.x * gridDim.x) {
int outer_id = t_idx / kGroupSize / inner_size;
int inner_id = t_idx / kGroupSize % inner_size;
int groupId = threadIdx.x / kGroupSize;
int tgId = threadIdx.x % kGroupSize;
int warpId = threadIdx.x / kWarpSize;
int laneId = threadIdx.x % kWarpSize;
T threadK = init_K;
S threadV = init_V;
if (laneId == 0) {
shared_K[warpId] = init_K;
shared_V[warpId] = init_V;
}
__syncthreads();
for (int i = tgId; i < bound; i += kGroupSize) {
T other_K = input[outer_id * bound * inner_size + i * inner_size + inner_id];
S other_V = i;
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
__syncwarp();
for (int offset = kWarpSize / 2; offset > 0; offset /= 2) {
T other_K = __shfl_down_sync(0xffffffff, threadK, offset);
S other_V = __shfl_down_sync(0xffffffff, threadV, offset);
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
__syncwarp();
if (laneId == 0) {
shared_K[warpId] = threadK;
shared_V[warpId] = threadV;
}
__syncthreads();
if (tgId < 2) {
bool is_winner =
small ? Cmp<T, S>::gt(shared_K[(groupId * kWarpGroup) + tgId], shared_K[(groupId * kWarpGroup) + tgId + 2],
shared_V[(groupId * kWarpGroup) + tgId], shared_V[(groupId * kWarpGroup) + tgId + 2])
: Cmp<T, S>::lt(shared_K[(groupId * kWarpGroup) + tgId], shared_K[(groupId * kWarpGroup) + tgId + 2],
shared_V[(groupId * kWarpGroup) + tgId], shared_V[(groupId * kWarpGroup) + tgId + 2]);
ConditionAssign(is_winner, (shared_K + (groupId * kWarpGroup) + tgId),
(shared_K[(groupId * kWarpGroup) + tgId + 2]));
ConditionAssign(is_winner, (shared_V + (groupId * kWarpGroup) + tgId),
(shared_V[(groupId * kWarpGroup) + tgId + 2]));
}
__syncwarp();
if (tgId == 0) {
bool is_winner =
small ? Cmp<T, S>::gt(shared_K[(groupId * kWarpGroup) + tgId], shared_K[(groupId * kWarpGroup) + tgId + 1],
shared_V[(groupId * kWarpGroup) + tgId], shared_V[(groupId * kWarpGroup) + tgId + 1])
: Cmp<T, S>::lt(shared_K[(groupId * kWarpGroup) + tgId], shared_K[(groupId * kWarpGroup) + tgId + 1],
shared_V[(groupId * kWarpGroup) + tgId], shared_V[(groupId * kWarpGroup) + tgId + 1]);
ConditionAssign(is_winner, (shared_K + (groupId * kWarpGroup) + tgId),
(shared_K[(groupId * kWarpGroup) + tgId + 1]));
ConditionAssign(is_winner, (shared_V + (groupId * kWarpGroup) + tgId),
(shared_V[(groupId * kWarpGroup) + tgId + 1]));
// The first thread of each group write output
output[outer_id * inner_size + inner_id] = shared_K[groupId * kWarpGroup];
output_index[outer_id * inner_size + inner_id] = shared_V[groupId * kWarpGroup];
}
__syncthreads();
}
}
template <typename T, typename S>
__global__ void BlockReduction(bool small, size_t outer_size, size_t bound, size_t inner_size, const T *input,
T *output, S *output_index, bool fp16_flag, T init_K) {
__shared__ T shared_K[kNumWarps];
__shared__ S shared_V[kNumWarps];
if (fp16_flag) {
init_K = small ? __int2half_rd(65504) : __int2half_rd(-65504);
}
const S init_V = static_cast<S>(-1);
for (int t_idx = blockIdx.x * blockDim.x + threadIdx.x; t_idx < kBlockSize * outer_size * inner_size;
t_idx += blockDim.x * gridDim.x) {
int outer_id = t_idx / kBlockSize / inner_size;
int inner_id = t_idx / kBlockSize % inner_size;
int tgId = threadIdx.x % kBlockSize;
int warpId = threadIdx.x / kWarpSize;
int laneId = threadIdx.x % kWarpSize;
T threadK = init_K;
S threadV = init_V;
if (laneId == 0) {
shared_K[warpId] = init_K;
shared_V[warpId] = init_V;
}
__syncthreads();
for (int i = tgId; i < bound; i += kBlockSize) {
T other_K = input[outer_id * bound * inner_size + i * inner_size + inner_id];
S other_V = i;
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
__syncwarp();
for (int offset = kWarpSize / 2; offset > 0; offset /= 2) {
T other_K = __shfl_down_sync(0xffffffff, threadK, offset);
S other_V = __shfl_down_sync(0xffffffff, threadV, offset);
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
__syncwarp();
if (laneId == 0) {
shared_K[warpId] = threadK;
shared_V[warpId] = threadV;
}
__syncthreads();
// Shared memory reduction
// There are 16 items in shared memory, can be reduced within one warp.
if (warpId == 0) {
threadK = laneId < kNumWarps ? shared_K[laneId] : init_K;
threadV = laneId < kNumWarps ? shared_V[laneId] : init_V;
}
__syncwarp();
if (warpId == 0) {
for (int offset = kWarpSize / 4; offset > 0; offset /= 2) {
T other_K = __shfl_down_sync(0xffffffff, threadK, offset);
S other_V = __shfl_down_sync(0xffffffff, threadV, offset);
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
}
__syncwarp();
if (warpId == 0 && laneId == 0) {
output[outer_id * inner_size + inner_id] = threadK;
output_index[outer_id * inner_size + inner_id] = threadV;
}
}
}
template <typename T, typename S>
void GeneralReduction(bool small, size_t outer_size, size_t bound, size_t inner_size, const T *input, T *output,
S *output_index, hipStream_t stream) {
int block_num_limit = outer_size * inner_size;
bool fp16_flag = false;
if (std::is_same<T, half>::value) {
fp16_flag = true;
}
T init_K = small ? std::numeric_limits<T>::lowest() : std::numeric_limits<T>::lowest();
if (bound <= kMaxThreadLoop) {
hipLaunchKernelGGL(( ThreadReduction<T, S>), dim3(GET_BLOCKS(block_num_limit)), dim3(kBlockSize), 0, stream,
small, outer_size, bound, inner_size, input, output, output_index, fp16_flag, init_K);
} else if (bound <= kMaxWarpLoop) {
hipLaunchKernelGGL(( WarpReduction<T, S>), dim3(GET_BLOCKS(block_num_limit * kWarpSize)), dim3(kBlockSize), 0, stream,
small, outer_size, bound, inner_size, input, output, output_index, fp16_flag, init_K);
} else if (bound <= kMaxGroupLoop) {
hipLaunchKernelGGL(( Warp4Reduction<T, S>), dim3(GET_BLOCKS(block_num_limit * kGroupSize)), dim3(kBlockSize), 0, stream,
small, outer_size, bound, inner_size, input, output, output_index, fp16_flag, init_K);
} else {
hipLaunchKernelGGL(( BlockReduction<T, S>), dim3(GET_BLOCKS(block_num_limit * kBlockSize)), dim3(kBlockSize), 0, stream,
small, outer_size, bound, inner_size, input, output, output_index, fp16_flag, init_K);
}
}
template <typename T, typename S>
void CalGeneralReduction(bool small, const T *input, const size_t bound, const size_t outerSize, const size_t innerSize,
S *index, T *output, hipStream_t cuda_stream) {
GeneralReduction(small, outerSize, bound, innerSize, input, output, index, cuda_stream);
return;
}
template void CalGeneralReduction(bool small, const float *input, const size_t bound_, const size_t outerSize_,
const size_t innerSize_, int *index, float *output, hipStream_t cuda_stream);
template void CalGeneralReduction(bool small, const half *input, const size_t bound_, const size_t outerSize_,
const size_t innerSize_, int *index, half *output, hipStream_t cuda_stream);
| 9d32b5c2cb81ffd6a21c91e381e5040621ea5dff.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <limits>
#include "runtime/device/gpu/cuda_common.h"
#include "include/cuda_fp16.h"
#include "backend/kernel_compiler/gpu/cuda_impl/general_reduction_impl.cuh"
const int kWarpSize = 32;
const int kBlockSize = 512;
const int kWarpGroup = 4;
const int kNumWarps = kBlockSize / kWarpSize; // 16
const int kGroupSize = kWarpGroup * kWarpSize; // 128
// Mode selection constant
const int kMaxThreadLoop = 4;
const int kMaxWarpLoop = kWarpSize * 3; // 32 * 3 = 96
const int kMaxGroupLoop = kGroupSize * 3; // 128 * 3 =
// 384
template <typename T, typename S>
struct Cmp {
__device__ static inline bool lt(T a, T b, S i, S j) { return (a < b) || ((a == b) && (i < 0 || j < i)); }
__device__ static inline bool gt(T a, T b, S i, S j) { return (a > b) || ((a == b) && (i < 0 || j < i)); }
};
template <typename T>
inline __device__ void ConditionAssign(bool is_assign, T *x, const T &y) {
(*x) = is_assign ? y : (*x);
}
template <typename T, typename S>
__global__ void ThreadReduction(bool small, size_t outer_size, size_t bound, size_t inner_size, const T *input,
T *output, S *output_index, bool fp16_flag, T init_K) {
if (fp16_flag) {
init_K = small ? __int2half_rd(65504) : __int2half_rd(-65504);
}
const S init_V = static_cast<S>(-1);
for (int t_idx = blockIdx.x * blockDim.x + threadIdx.x; t_idx < outer_size * inner_size;
t_idx += blockDim.x * gridDim.x) {
int outer_id = t_idx / inner_size;
int inner_id = t_idx % inner_size;
T threadK = init_K;
S threadV = init_V;
for (int i = 0; i < bound; i++) {
T other_K = input[outer_id * bound * inner_size + i * inner_size + inner_id];
S other_V = i;
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
output[outer_id * inner_size + inner_id] = threadK;
output_index[outer_id * inner_size + inner_id] = threadV;
}
}
template <typename T, typename S>
__global__ void WarpReduction(bool small, size_t outer_size, size_t bound, size_t inner_size, const T *input, T *output,
S *output_index, bool fp16_flag, T init_K) {
if (fp16_flag) {
init_K = small ? __int2half_rd(65504) : __int2half_rd(-65504);
}
const S init_V = static_cast<S>(-1);
for (int t_idx = blockIdx.x * blockDim.x + threadIdx.x; t_idx < kWarpSize * outer_size * inner_size;
t_idx += blockDim.x * gridDim.x) {
int outer_id = t_idx / kWarpSize / inner_size;
int inner_id = t_idx / kWarpSize % inner_size;
int laneId = threadIdx.x % kWarpSize;
T threadK = init_K;
S threadV = init_V;
for (int i = laneId; i < bound; i += kWarpSize) {
T other_K = input[outer_id * bound * inner_size + i * inner_size + inner_id];
S other_V = i;
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
__syncwarp();
for (int offset = kWarpSize / 2; offset > 0; offset /= 2) {
T other_K = __shfl_down_sync(0xffffffff, threadK, offset);
S other_V = __shfl_down_sync(0xffffffff, threadV, offset);
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
__syncwarp();
if (laneId == 0) {
output[outer_id * inner_size + inner_id] = threadK;
output_index[outer_id * inner_size + inner_id] = threadV;
}
__syncthreads();
}
}
template <typename T, typename S>
__global__ void Warp4Reduction(bool small, size_t outer_size, size_t bound, size_t inner_size, const T *input,
T *output, S *output_index, bool fp16_flag, T init_K) {
__shared__ T shared_K[kNumWarps];
__shared__ S shared_V[kNumWarps];
if (fp16_flag) {
init_K = small ? __int2half_rd(65504) : __int2half_rd(-65504);
}
const S init_V = static_cast<S>(-1);
for (int t_idx = blockIdx.x * blockDim.x + threadIdx.x; t_idx < kGroupSize * outer_size * inner_size;
t_idx += blockDim.x * gridDim.x) {
int outer_id = t_idx / kGroupSize / inner_size;
int inner_id = t_idx / kGroupSize % inner_size;
int groupId = threadIdx.x / kGroupSize;
int tgId = threadIdx.x % kGroupSize;
int warpId = threadIdx.x / kWarpSize;
int laneId = threadIdx.x % kWarpSize;
T threadK = init_K;
S threadV = init_V;
if (laneId == 0) {
shared_K[warpId] = init_K;
shared_V[warpId] = init_V;
}
__syncthreads();
for (int i = tgId; i < bound; i += kGroupSize) {
T other_K = input[outer_id * bound * inner_size + i * inner_size + inner_id];
S other_V = i;
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
__syncwarp();
for (int offset = kWarpSize / 2; offset > 0; offset /= 2) {
T other_K = __shfl_down_sync(0xffffffff, threadK, offset);
S other_V = __shfl_down_sync(0xffffffff, threadV, offset);
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
__syncwarp();
if (laneId == 0) {
shared_K[warpId] = threadK;
shared_V[warpId] = threadV;
}
__syncthreads();
if (tgId < 2) {
bool is_winner =
small ? Cmp<T, S>::gt(shared_K[(groupId * kWarpGroup) + tgId], shared_K[(groupId * kWarpGroup) + tgId + 2],
shared_V[(groupId * kWarpGroup) + tgId], shared_V[(groupId * kWarpGroup) + tgId + 2])
: Cmp<T, S>::lt(shared_K[(groupId * kWarpGroup) + tgId], shared_K[(groupId * kWarpGroup) + tgId + 2],
shared_V[(groupId * kWarpGroup) + tgId], shared_V[(groupId * kWarpGroup) + tgId + 2]);
ConditionAssign(is_winner, (shared_K + (groupId * kWarpGroup) + tgId),
(shared_K[(groupId * kWarpGroup) + tgId + 2]));
ConditionAssign(is_winner, (shared_V + (groupId * kWarpGroup) + tgId),
(shared_V[(groupId * kWarpGroup) + tgId + 2]));
}
__syncwarp();
if (tgId == 0) {
bool is_winner =
small ? Cmp<T, S>::gt(shared_K[(groupId * kWarpGroup) + tgId], shared_K[(groupId * kWarpGroup) + tgId + 1],
shared_V[(groupId * kWarpGroup) + tgId], shared_V[(groupId * kWarpGroup) + tgId + 1])
: Cmp<T, S>::lt(shared_K[(groupId * kWarpGroup) + tgId], shared_K[(groupId * kWarpGroup) + tgId + 1],
shared_V[(groupId * kWarpGroup) + tgId], shared_V[(groupId * kWarpGroup) + tgId + 1]);
ConditionAssign(is_winner, (shared_K + (groupId * kWarpGroup) + tgId),
(shared_K[(groupId * kWarpGroup) + tgId + 1]));
ConditionAssign(is_winner, (shared_V + (groupId * kWarpGroup) + tgId),
(shared_V[(groupId * kWarpGroup) + tgId + 1]));
// The first thread of each group write output
output[outer_id * inner_size + inner_id] = shared_K[groupId * kWarpGroup];
output_index[outer_id * inner_size + inner_id] = shared_V[groupId * kWarpGroup];
}
__syncthreads();
}
}
template <typename T, typename S>
__global__ void BlockReduction(bool small, size_t outer_size, size_t bound, size_t inner_size, const T *input,
T *output, S *output_index, bool fp16_flag, T init_K) {
__shared__ T shared_K[kNumWarps];
__shared__ S shared_V[kNumWarps];
if (fp16_flag) {
init_K = small ? __int2half_rd(65504) : __int2half_rd(-65504);
}
const S init_V = static_cast<S>(-1);
for (int t_idx = blockIdx.x * blockDim.x + threadIdx.x; t_idx < kBlockSize * outer_size * inner_size;
t_idx += blockDim.x * gridDim.x) {
int outer_id = t_idx / kBlockSize / inner_size;
int inner_id = t_idx / kBlockSize % inner_size;
int tgId = threadIdx.x % kBlockSize;
int warpId = threadIdx.x / kWarpSize;
int laneId = threadIdx.x % kWarpSize;
T threadK = init_K;
S threadV = init_V;
if (laneId == 0) {
shared_K[warpId] = init_K;
shared_V[warpId] = init_V;
}
__syncthreads();
for (int i = tgId; i < bound; i += kBlockSize) {
T other_K = input[outer_id * bound * inner_size + i * inner_size + inner_id];
S other_V = i;
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
__syncwarp();
for (int offset = kWarpSize / 2; offset > 0; offset /= 2) {
T other_K = __shfl_down_sync(0xffffffff, threadK, offset);
S other_V = __shfl_down_sync(0xffffffff, threadV, offset);
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
__syncwarp();
if (laneId == 0) {
shared_K[warpId] = threadK;
shared_V[warpId] = threadV;
}
__syncthreads();
// Shared memory reduction
// There are 16 items in shared memory, can be reduced within one warp.
if (warpId == 0) {
threadK = laneId < kNumWarps ? shared_K[laneId] : init_K;
threadV = laneId < kNumWarps ? shared_V[laneId] : init_V;
}
__syncwarp();
if (warpId == 0) {
for (int offset = kWarpSize / 4; offset > 0; offset /= 2) {
T other_K = __shfl_down_sync(0xffffffff, threadK, offset);
S other_V = __shfl_down_sync(0xffffffff, threadV, offset);
bool is_winner =
small ? Cmp<T, S>::gt(threadK, other_K, threadV, other_V) : Cmp<T, S>::lt(threadK, other_K, threadV, other_V);
ConditionAssign(is_winner, &threadK, other_K);
ConditionAssign(is_winner, &threadV, other_V);
}
}
__syncwarp();
if (warpId == 0 && laneId == 0) {
output[outer_id * inner_size + inner_id] = threadK;
output_index[outer_id * inner_size + inner_id] = threadV;
}
}
}
template <typename T, typename S>
void GeneralReduction(bool small, size_t outer_size, size_t bound, size_t inner_size, const T *input, T *output,
S *output_index, cudaStream_t stream) {
int block_num_limit = outer_size * inner_size;
bool fp16_flag = false;
if (std::is_same<T, half>::value) {
fp16_flag = true;
}
T init_K = small ? std::numeric_limits<T>::lowest() : std::numeric_limits<T>::lowest();
if (bound <= kMaxThreadLoop) {
ThreadReduction<T, S><<<GET_BLOCKS(block_num_limit), kBlockSize, 0, stream>>>(
small, outer_size, bound, inner_size, input, output, output_index, fp16_flag, init_K);
} else if (bound <= kMaxWarpLoop) {
WarpReduction<T, S><<<GET_BLOCKS(block_num_limit * kWarpSize), kBlockSize, 0, stream>>>(
small, outer_size, bound, inner_size, input, output, output_index, fp16_flag, init_K);
} else if (bound <= kMaxGroupLoop) {
Warp4Reduction<T, S><<<GET_BLOCKS(block_num_limit * kGroupSize), kBlockSize, 0, stream>>>(
small, outer_size, bound, inner_size, input, output, output_index, fp16_flag, init_K);
} else {
BlockReduction<T, S><<<GET_BLOCKS(block_num_limit * kBlockSize), kBlockSize, 0, stream>>>(
small, outer_size, bound, inner_size, input, output, output_index, fp16_flag, init_K);
}
}
template <typename T, typename S>
void CalGeneralReduction(bool small, const T *input, const size_t bound, const size_t outerSize, const size_t innerSize,
S *index, T *output, cudaStream_t cuda_stream) {
GeneralReduction(small, outerSize, bound, innerSize, input, output, index, cuda_stream);
return;
}
template void CalGeneralReduction(bool small, const float *input, const size_t bound_, const size_t outerSize_,
const size_t innerSize_, int *index, float *output, cudaStream_t cuda_stream);
template void CalGeneralReduction(bool small, const half *input, const size_t bound_, const size_t outerSize_,
const size_t innerSize_, int *index, half *output, cudaStream_t cuda_stream);
|
7a8ed6a701d8c6b704214dcd86172133b382124a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
#define THREADS_PER_BLOCK_CSR 32
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CAFFE1_CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CAFFE1_CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CAFFE1_CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CAFFE1_CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CAFFE1_CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CAFFE1_CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CAFFE1_CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CAFFE1_CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CAFFE1_CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CAFFE1_CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CAFFE1_CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CAFFE1_CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CAFFE1_CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CAFFE1_CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CAFFE1_CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CAFFE1_CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CAFFE1_CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CAFFE1_CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CAFFE1_CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CAFFE1_CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CAFFE1_CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CAFFE1_CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CAFFE1_CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
template<typename Dtype>
__device__ void caffe_gpu_csr_gemm_kernel_core(const int M, const int N,
const int K, const Dtype alpha,
int nzz, const Dtype* A,
const int* indices,
const int* ptr, const Dtype* B,
const int ldb1, const int ldb2,
const Dtype beta, Dtype* C,
const int ldc1, const int ldc2) {
__shared__ volatile Dtype sums[THREADS_PER_BLOCK_CSR * 2];
for (int rowA = blockIdx.x; rowA < M; rowA += gridDim.x) {
const int begin = ptr[rowA];
const int end = ptr[rowA + 1];
const int offset_c_part = rowA * ldc1;
for (int colC = blockIdx.y; colC < N; colC += gridDim.y) {
Dtype sum = 0.0;
const int offset_b_part = colC * ldb2;
for (int pos = begin + threadIdx.x; pos < end; pos +=
THREADS_PER_BLOCK_CSR) {
const int colA = indices[pos];
sum += A[pos] * B[colA * ldb1 + offset_b_part];
}
sums[threadIdx.x] = sum;
__syncthreads();
/* hardcoded reduction for 32 threads */
sums[threadIdx.x] += sums[threadIdx.x + 16];
sums[threadIdx.x] += sums[threadIdx.x + 8];
sums[threadIdx.x] += sums[threadIdx.x + 4];
sums[threadIdx.x] += sums[threadIdx.x + 2];
sums[threadIdx.x] += sums[threadIdx.x + 1];
if (threadIdx.x == 0) {
const int offsetC = offset_c_part + colC * ldc2;
C[offsetC] = beta * C[offsetC] + alpha * sums[0];
}
}
}
}
template<typename Dtype>
__global__ void caffe_gpu_csr_gemm_kernel(const CBLAS_TRANSPOSE TransB,
const int M, const int N, const int K,
const Dtype alpha, int nzz,
const Dtype* A, const int* indices,
const int* ptr, const Dtype* B,
const Dtype beta, Dtype* C,
const CBLAS_ORDER orderC) {
if (orderC == CblasRowMajor) {
if (TransB == CblasNoTrans) {
caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, N,
1, beta, C, N, 1);
} else {
caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, 1,
K, beta, C, N, 1);
}
} else {
if (TransB == CblasNoTrans) {
caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, N,
1, beta, C, 1, M);
} else {
caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, 1,
K, beta, C, 1, M);
}
}
}
template<typename Dtype>
__device__ void caffe_gpu_csr_rank1_update_kernel_core(const int M, const int N,
const Dtype alpha,
const Dtype* A,
const int* indices,
const int* ptr,
const Dtype* B, int ldb,
Dtype* C, const int ldc1,
const int ldc2) {
const int begin = ptr[0];
const int end = ptr[1];
for (int pos = blockIdx.x * blockDim.x + begin + threadIdx.x; pos < end;
pos += blockDim.x * gridDim.x) {
const Dtype valA = A[pos] * alpha;
const int offset_part = indices[pos] * ldc1;
for (int colC = blockIdx.y * blockDim.y + threadIdx.y; colC < N;
colC += blockDim.y * gridDim.y) {
const int C_offset = offset_part + colC * ldc2;
C[C_offset] = C[C_offset] + B[colC * ldb] * valA;
}
}
}
// C = alpha A * B^T + C where A and B are vectors.
// A is a sprase vector and B is a dense vector
template<typename Dtype>
__device__ void caffe_gpu_csr_rank1_update_kernel(const int M, const int N,
const Dtype alpha,
const Dtype* A,
const int* indices,
const int* ptr,
const Dtype* B, int ldb,
Dtype* C,
const CBLAS_ORDER orderC) {
if (orderC == CblasRowMajor) {
caffe_gpu_csr_rank1_update_kernel_core(M, N, alpha, A, indices, ptr, B, ldb,
C, N, 1);
} else {
caffe_gpu_csr_rank1_update_kernel_core(M, N, alpha, A, indices, ptr, B, ldb,
C, 1, M);
}
}
template<typename Dtype>
__global__ void caffe_gpu_csr_rank1_update_kernel_multi(
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const Dtype alpha, const Dtype* A, const int* indices, const int* ptr,
const Dtype* B, int ldb, Dtype* C, const CBLAS_ORDER orderC) {
if (TransB == CblasNoTrans) {
for (int i = 0; i < K; i++) {
caffe_gpu_csr_rank1_update_kernel(M, N, alpha, A, indices, ptr + i,
B + (N * i), 1, C, orderC);
}
} else {
for (int i = 0; i < K; i++) {
caffe_gpu_csr_rank1_update_kernel(M, N, alpha, A, indices, ptr + i, B + i,
K, C, orderC);
}
}
}
template<>
void caffe_gpu_csr_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M,
const int N, const int K, const float alpha,
int nzz, const float* A, const int* indices,
const int* ptr, const float* B, const float beta,
float* C, const CBLAS_ORDER orderC) {
if (TransA == CblasNoTrans) {
dim3 grids(M, N);
dim3 threads(THREADS_PER_BLOCK_CSR, 1);
caffe_gpu_csr_gemm_kernel<float><< <grids, threads>>>(TransB, M, N, K,
alpha, nzz, A, indices, ptr, B, beta, C, orderC);
} else {
// scale C by beta
if (beta != 1.0) {
CAFFE1_CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle() , M * N, &beta, C, 1));
}
const int average_nzz_per_row = nzz/K+1;
dim3 grids((average_nzz_per_row+64-1)/64, N);
dim3 threads(64, 1);
caffe_gpu_csr_rank1_update_kernel_multi<float><< <grids, threads>>>(TransB,
M, N, K,
alpha, A, indices, ptr , B, 1, C, orderC);
}
}
template<>
void caffe_gpu_csr_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M,
const int N, const int K, const double alpha,
int nzz, const double* A, const int* indices,
const int* ptr, const double* B,
const double beta, double* C,
const CBLAS_ORDER orderC) {
if (TransA == CblasNoTrans) {
dim3 grids(M, N);
dim3 threads(THREADS_PER_BLOCK_CSR, 1);
caffe_gpu_csr_gemm_kernel<double><< <grids, threads>>> (TransB, M, N, K,
alpha, nzz, A, indices, ptr, B, beta, C, orderC);
} else {
// scale C by beta
if (beta != 1.0) {
CAFFE1_CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle() , M * N, &beta, C, 1));
}
const int average_nzz_per_row = nzz/K+1;
dim3 grids((average_nzz_per_row+64-1)/64, N);
dim3 threads(64, 1);
caffe_gpu_csr_rank1_update_kernel_multi<double><< <grids, threads>>>(TransB,
M, N, K,
alpha, A, indices, ptr , B, 1, C, orderC);
}
}
/* Other implementation using cusparse that is very slow at least using it like this
template <>
void caffe_gpu_csr_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, int nzz, const float* A, const int* indices, const int* ptr, const float* B, const float beta,
float* C, const CBLAS_ORDER orderC) {
//std::cout << "M: " << M << " N: " << N << " K: " << K << " NZZ: " << nzz <<"\n" ;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipsparseOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPSPARSE_OPERATION_NON_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE;
hipsparseOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPSPARSE_OPERATION_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE;
float* Bt;
int ldb_t;
bool reuiqre_transpose_B = (cuTransA == HIPSPARSE_OPERATION_TRANSPOSE) && (cuTransB == HIPSPARSE_OPERATION_TRANSPOSE);
if (reuiqre_transpose_B){
//we need to transpose B because this operation is not supported by cusparse (god knows why)
ldb_t = K;
const float zero = 0.0;
const float one = 1.0;
CAFFE1_CUDA_CHECK(hipMalloc((void**)&Bt, sizeof(float)*K*N));
CAFFE1_CUBLAS_CHECK(hipblasSgeam(Caffe::cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_T, K, N, &one, B, ldb, &zero, B, ldb, Bt, ldb_t));
}
int msparse = (TransA == CblasNoTrans) ? M : K;
int ksparse = (TransA == CblasNoTrans) ? K : M;
if (orderC == CblasRowMajor){
float* Ct;
CAFFE1_CUDA_CHECK(hipMalloc((void**)&Ct, sizeof(float)*M*N));
const float zero = 0.0;
const float one = 1.0;
if (reuiqre_transpose_B){
CUSPARSE_CHECK(hipsparseScsrmm2(Caffe::cusparse_handle(), cuTransA, HIPSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &zero, Ct, M));
CAFFE1_CUDA_CHECK(hipFree(Bt));
}else{
CUSPARSE_CHECK(hipsparseScsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &zero, Ct, M));
}
CAFFE1_CUBLAS_CHECK(hipblasSgeam(Caffe::cublas_handle(), HIPBLAS_OP_T , HIPBLAS_OP_N, N, M, &one, Ct, M, &beta, C, N, C, N));
CAFFE1_CUDA_CHECK(hipFree(Ct));
}else{
//this is the default of CUSPARSE by the Matrix B is by default rowmajor
if (reuiqre_transpose_B){
CUSPARSE_CHECK(hipsparseScsrmm2(Caffe::cusparse_handle(), cuTransA, HIPSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &beta, C, M));
CAFFE1_CUDA_CHECK(hipFree(Bt));
}else{
CUSPARSE_CHECK(hipsparseScsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &beta, C, M));
}
}
}
template <>
void caffe_gpu_csr_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, int nzz, const double* A, const int* indices, const int* ptr, const double* B, const double beta,
double* C, const CBLAS_ORDER orderC) {
//std::cout << "M: " << M << "N: " << N << "K: " << K << "NZZ: " << nzz ;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipsparseOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPSPARSE_OPERATION_NON_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE;
hipsparseOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPSPARSE_OPERATION_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE;
double* Bt;
int ldb_t;
bool reuiqre_transpose_B = (cuTransA == HIPSPARSE_OPERATION_TRANSPOSE) && (cuTransB == HIPSPARSE_OPERATION_TRANSPOSE);
if (reuiqre_transpose_B){
//we need to transpose B because this operation is not supported by cusparse (god knows why)
ldb_t = K;
const double zero = 0.0;
const double one = 1.0;
CAFFE1_CUDA_CHECK(hipMalloc((void**)&Bt, sizeof(double)*K*N));
CAFFE1_CUBLAS_CHECK(hipblasDgeam(Caffe::cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_T, K, N, &one, B, ldb, &zero, B, ldb, Bt, ldb_t));
}
int msparse = (TransA == CblasNoTrans) ? M : K;
int ksparse = (TransA == CblasNoTrans) ? K : M;
if (orderC == CblasRowMajor){
double* Ct;
CAFFE1_CUDA_CHECK(hipMalloc((void**)&Ct, sizeof(double)*M*N));
const double zero = 0.0;
const double one = 1.0;
if (reuiqre_transpose_B){
CUSPARSE_CHECK(hipsparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, HIPSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &zero, Ct, M));
CAFFE1_CUDA_CHECK(hipFree(Bt));
}else{
CUSPARSE_CHECK(hipsparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &zero, Ct, M));
}
CAFFE1_CUBLAS_CHECK(hipblasDgeam(Caffe::cublas_handle(), HIPBLAS_OP_T , HIPBLAS_OP_N, N, M, &one, Ct, M, &beta, C, N, C, N));
CAFFE1_CUDA_CHECK(hipFree(Ct));
}else{
//this is the default of CUSPARSE by the Matrix B is by default rowmajor
if (reuiqre_transpose_B){
CUSPARSE_CHECK(hipsparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, HIPSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &beta, C, M));
CAFFE1_CUDA_CHECK(hipFree(Bt));
}else{
CUSPARSE_CHECK(hipsparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &beta, C, M));
}
}
}
*/
} // namespace caffe
| 7a8ed6a701d8c6b704214dcd86172133b382124a.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
#define THREADS_PER_BLOCK_CSR 32
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CAFFE1_CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CAFFE1_CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CAFFE1_CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CAFFE1_CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CAFFE1_CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CAFFE1_CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CAFFE1_CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CAFFE1_CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CAFFE1_CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CAFFE1_CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CAFFE1_CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CAFFE1_CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CAFFE1_CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CAFFE1_CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CAFFE1_CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CAFFE1_CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CAFFE1_CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CAFFE1_CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CAFFE1_CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CAFFE1_CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CAFFE1_CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CAFFE1_CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CAFFE1_CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
template<typename Dtype>
__device__ void caffe_gpu_csr_gemm_kernel_core(const int M, const int N,
const int K, const Dtype alpha,
int nzz, const Dtype* A,
const int* indices,
const int* ptr, const Dtype* B,
const int ldb1, const int ldb2,
const Dtype beta, Dtype* C,
const int ldc1, const int ldc2) {
__shared__ volatile Dtype sums[THREADS_PER_BLOCK_CSR * 2];
for (int rowA = blockIdx.x; rowA < M; rowA += gridDim.x) {
const int begin = ptr[rowA];
const int end = ptr[rowA + 1];
const int offset_c_part = rowA * ldc1;
for (int colC = blockIdx.y; colC < N; colC += gridDim.y) {
Dtype sum = 0.0;
const int offset_b_part = colC * ldb2;
for (int pos = begin + threadIdx.x; pos < end; pos +=
THREADS_PER_BLOCK_CSR) {
const int colA = indices[pos];
sum += A[pos] * B[colA * ldb1 + offset_b_part];
}
sums[threadIdx.x] = sum;
__syncthreads();
/* hardcoded reduction for 32 threads */
sums[threadIdx.x] += sums[threadIdx.x + 16];
sums[threadIdx.x] += sums[threadIdx.x + 8];
sums[threadIdx.x] += sums[threadIdx.x + 4];
sums[threadIdx.x] += sums[threadIdx.x + 2];
sums[threadIdx.x] += sums[threadIdx.x + 1];
if (threadIdx.x == 0) {
const int offsetC = offset_c_part + colC * ldc2;
C[offsetC] = beta * C[offsetC] + alpha * sums[0];
}
}
}
}
template<typename Dtype>
__global__ void caffe_gpu_csr_gemm_kernel(const CBLAS_TRANSPOSE TransB,
const int M, const int N, const int K,
const Dtype alpha, int nzz,
const Dtype* A, const int* indices,
const int* ptr, const Dtype* B,
const Dtype beta, Dtype* C,
const CBLAS_ORDER orderC) {
if (orderC == CblasRowMajor) {
if (TransB == CblasNoTrans) {
caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, N,
1, beta, C, N, 1);
} else {
caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, 1,
K, beta, C, N, 1);
}
} else {
if (TransB == CblasNoTrans) {
caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, N,
1, beta, C, 1, M);
} else {
caffe_gpu_csr_gemm_kernel_core(M, N, K, alpha, nzz, A, indices, ptr, B, 1,
K, beta, C, 1, M);
}
}
}
template<typename Dtype>
__device__ void caffe_gpu_csr_rank1_update_kernel_core(const int M, const int N,
const Dtype alpha,
const Dtype* A,
const int* indices,
const int* ptr,
const Dtype* B, int ldb,
Dtype* C, const int ldc1,
const int ldc2) {
const int begin = ptr[0];
const int end = ptr[1];
for (int pos = blockIdx.x * blockDim.x + begin + threadIdx.x; pos < end;
pos += blockDim.x * gridDim.x) {
const Dtype valA = A[pos] * alpha;
const int offset_part = indices[pos] * ldc1;
for (int colC = blockIdx.y * blockDim.y + threadIdx.y; colC < N;
colC += blockDim.y * gridDim.y) {
const int C_offset = offset_part + colC * ldc2;
C[C_offset] = C[C_offset] + B[colC * ldb] * valA;
}
}
}
// C = alpha A * B^T + C where A and B are vectors.
// A is a sprase vector and B is a dense vector
template<typename Dtype>
__device__ void caffe_gpu_csr_rank1_update_kernel(const int M, const int N,
const Dtype alpha,
const Dtype* A,
const int* indices,
const int* ptr,
const Dtype* B, int ldb,
Dtype* C,
const CBLAS_ORDER orderC) {
if (orderC == CblasRowMajor) {
caffe_gpu_csr_rank1_update_kernel_core(M, N, alpha, A, indices, ptr, B, ldb,
C, N, 1);
} else {
caffe_gpu_csr_rank1_update_kernel_core(M, N, alpha, A, indices, ptr, B, ldb,
C, 1, M);
}
}
template<typename Dtype>
__global__ void caffe_gpu_csr_rank1_update_kernel_multi(
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const Dtype alpha, const Dtype* A, const int* indices, const int* ptr,
const Dtype* B, int ldb, Dtype* C, const CBLAS_ORDER orderC) {
if (TransB == CblasNoTrans) {
for (int i = 0; i < K; i++) {
caffe_gpu_csr_rank1_update_kernel(M, N, alpha, A, indices, ptr + i,
B + (N * i), 1, C, orderC);
}
} else {
for (int i = 0; i < K; i++) {
caffe_gpu_csr_rank1_update_kernel(M, N, alpha, A, indices, ptr + i, B + i,
K, C, orderC);
}
}
}
template<>
void caffe_gpu_csr_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M,
const int N, const int K, const float alpha,
int nzz, const float* A, const int* indices,
const int* ptr, const float* B, const float beta,
float* C, const CBLAS_ORDER orderC) {
if (TransA == CblasNoTrans) {
dim3 grids(M, N);
dim3 threads(THREADS_PER_BLOCK_CSR, 1);
caffe_gpu_csr_gemm_kernel<float><< <grids, threads>>>(TransB, M, N, K,
alpha, nzz, A, indices, ptr, B, beta, C, orderC);
} else {
// scale C by beta
if (beta != 1.0) {
CAFFE1_CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle() , M * N, &beta, C, 1));
}
const int average_nzz_per_row = nzz/K+1;
dim3 grids((average_nzz_per_row+64-1)/64, N);
dim3 threads(64, 1);
caffe_gpu_csr_rank1_update_kernel_multi<float><< <grids, threads>>>(TransB,
M, N, K,
alpha, A, indices, ptr , B, 1, C, orderC);
}
}
template<>
void caffe_gpu_csr_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M,
const int N, const int K, const double alpha,
int nzz, const double* A, const int* indices,
const int* ptr, const double* B,
const double beta, double* C,
const CBLAS_ORDER orderC) {
if (TransA == CblasNoTrans) {
dim3 grids(M, N);
dim3 threads(THREADS_PER_BLOCK_CSR, 1);
caffe_gpu_csr_gemm_kernel<double><< <grids, threads>>> (TransB, M, N, K,
alpha, nzz, A, indices, ptr, B, beta, C, orderC);
} else {
// scale C by beta
if (beta != 1.0) {
CAFFE1_CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle() , M * N, &beta, C, 1));
}
const int average_nzz_per_row = nzz/K+1;
dim3 grids((average_nzz_per_row+64-1)/64, N);
dim3 threads(64, 1);
caffe_gpu_csr_rank1_update_kernel_multi<double><< <grids, threads>>>(TransB,
M, N, K,
alpha, A, indices, ptr , B, 1, C, orderC);
}
}
/* Other implementation using cusparse that is very slow at least using it like this
template <>
void caffe_gpu_csr_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, int nzz, const float* A, const int* indices, const int* ptr, const float* B, const float beta,
float* C, const CBLAS_ORDER orderC) {
//std::cout << "M: " << M << " N: " << N << " K: " << K << " NZZ: " << nzz <<"\n" ;
int ldb = (TransB == CblasNoTrans) ? N : K;
cusparseOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUSPARSE_OPERATION_NON_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE;
cusparseOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE;
float* Bt;
int ldb_t;
bool reuiqre_transpose_B = (cuTransA == CUSPARSE_OPERATION_TRANSPOSE) && (cuTransB == CUSPARSE_OPERATION_TRANSPOSE);
if (reuiqre_transpose_B){
//we need to transpose B because this operation is not supported by cusparse (god knows why)
ldb_t = K;
const float zero = 0.0;
const float one = 1.0;
CAFFE1_CUDA_CHECK(cudaMalloc((void**)&Bt, sizeof(float)*K*N));
CAFFE1_CUBLAS_CHECK(cublasSgeam(Caffe::cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_T, K, N, &one, B, ldb, &zero, B, ldb, Bt, ldb_t));
}
int msparse = (TransA == CblasNoTrans) ? M : K;
int ksparse = (TransA == CblasNoTrans) ? K : M;
if (orderC == CblasRowMajor){
float* Ct;
CAFFE1_CUDA_CHECK(cudaMalloc((void**)&Ct, sizeof(float)*M*N));
const float zero = 0.0;
const float one = 1.0;
if (reuiqre_transpose_B){
CUSPARSE_CHECK(cusparseScsrmm2(Caffe::cusparse_handle(), cuTransA, CUSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &zero, Ct, M));
CAFFE1_CUDA_CHECK(cudaFree(Bt));
}else{
CUSPARSE_CHECK(cusparseScsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &zero, Ct, M));
}
CAFFE1_CUBLAS_CHECK(cublasSgeam(Caffe::cublas_handle(), CUBLAS_OP_T , CUBLAS_OP_N, N, M, &one, Ct, M, &beta, C, N, C, N));
CAFFE1_CUDA_CHECK(cudaFree(Ct));
}else{
//this is the default of CUSPARSE by the Matrix B is by default rowmajor
if (reuiqre_transpose_B){
CUSPARSE_CHECK(cusparseScsrmm2(Caffe::cusparse_handle(), cuTransA, CUSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &beta, C, M));
CAFFE1_CUDA_CHECK(cudaFree(Bt));
}else{
CUSPARSE_CHECK(cusparseScsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &beta, C, M));
}
}
}
template <>
void caffe_gpu_csr_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, int nzz, const double* A, const int* indices, const int* ptr, const double* B, const double beta,
double* C, const CBLAS_ORDER orderC) {
//std::cout << "M: " << M << "N: " << N << "K: " << K << "NZZ: " << nzz ;
int ldb = (TransB == CblasNoTrans) ? N : K;
cusparseOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUSPARSE_OPERATION_NON_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE;
cusparseOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE;
double* Bt;
int ldb_t;
bool reuiqre_transpose_B = (cuTransA == CUSPARSE_OPERATION_TRANSPOSE) && (cuTransB == CUSPARSE_OPERATION_TRANSPOSE);
if (reuiqre_transpose_B){
//we need to transpose B because this operation is not supported by cusparse (god knows why)
ldb_t = K;
const double zero = 0.0;
const double one = 1.0;
CAFFE1_CUDA_CHECK(cudaMalloc((void**)&Bt, sizeof(double)*K*N));
CAFFE1_CUBLAS_CHECK(cublasDgeam(Caffe::cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_T, K, N, &one, B, ldb, &zero, B, ldb, Bt, ldb_t));
}
int msparse = (TransA == CblasNoTrans) ? M : K;
int ksparse = (TransA == CblasNoTrans) ? K : M;
if (orderC == CblasRowMajor){
double* Ct;
CAFFE1_CUDA_CHECK(cudaMalloc((void**)&Ct, sizeof(double)*M*N));
const double zero = 0.0;
const double one = 1.0;
if (reuiqre_transpose_B){
CUSPARSE_CHECK(cusparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, CUSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &zero, Ct, M));
CAFFE1_CUDA_CHECK(cudaFree(Bt));
}else{
CUSPARSE_CHECK(cusparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &zero, Ct, M));
}
CAFFE1_CUBLAS_CHECK(cublasDgeam(Caffe::cublas_handle(), CUBLAS_OP_T , CUBLAS_OP_N, N, M, &one, Ct, M, &beta, C, N, C, N));
CAFFE1_CUDA_CHECK(cudaFree(Ct));
}else{
//this is the default of CUSPARSE by the Matrix B is by default rowmajor
if (reuiqre_transpose_B){
CUSPARSE_CHECK(cusparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, CUSPARSE_OPERATION_NON_TRANSPOSE, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, Bt, ldb_t, &beta, C, M));
CAFFE1_CUDA_CHECK(cudaFree(Bt));
}else{
CUSPARSE_CHECK(cusparseDcsrmm2(Caffe::cusparse_handle(), cuTransA, cuTransB, msparse, N, ksparse,nzz, &alpha, Caffe::cusparse_mat_descr(), A, ptr, indices, B, ldb, &beta, C, M));
}
}
}
*/
} // namespace caffe
|
544d2ad2fc50e97e6cf475cb96d873222f0a723d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#define size 5
#define threads 32
using namespace std;
__global__ void callOperation(int *a, int *b,int *res, int k, int p, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n) {
return;
}
res[tid] = a[tid] + b[tid];
if (res[tid] > k) {
res[tid] = p;
}
}
__global__ void callOperationSharedStatic(int *a, int *b, int *res, int k, int p, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n) {
return;
}
__shared__ int s_a[size], s_b[size], s_res[size];
__shared__ int s_k, s_p;
s_k = k;
s_p = p;
s_a[tid] = a[tid];
s_b[tid] = b[tid];
s_res[tid] = s_a[tid] + s_b[tid];
if (s_res[tid] > s_k) {
s_res[tid] = s_p;
}
res[tid] = s_res[tid];
}
__global__ void callOperationSharedDynamic(int *a, int *b, int *res, int k, int p, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid>= n)
{
return;
}
extern __shared__ int data[];
int *s_a = data;
int *s_b = &s_a[n];
int *s_res = &s_b[n];
__shared__ int s_k, s_p;
s_k = k;
s_p = p;
s_a[tid] = a[tid];
s_b[tid] = b[tid];
s_res[tid] = res[tid];
s_res[tid] = s_a[tid] + s_b[tid];
if (s_res[tid] > s_k)
{
s_res[tid] = s_p;
}
res[tid] = s_res[tid];
}
int main()
{
int *a, *b, *res;
int *d_a, *d_b, *d_res;
int k, p;
cout << "Unesi broj k:" << endl;
cin >> k;
cout << "Unesi broj p:" << endl;
cin >> p;
a = (int*)malloc(size * sizeof(int));
b = (int*)malloc(size * sizeof(int));
res = (int*)malloc(size * sizeof(int));
for (int i = 0; i < size; i++) {
a[i] = size * i;
b[i] = size / 2 * i;
}
cout << "\nNiz A:" << endl;
for (int i = 0; i < size; i++) {
cout << a[i] << endl;
}
cout << "\nNiz B:" << endl;
for (int i = 0; i < size; i++) {
cout << b[i] << endl;
}
hipMalloc((void**)&d_a, size * sizeof(int));
hipMalloc((void**)&d_b, size * sizeof(int));
hipMalloc((void**)&d_res, size * sizeof(int));
hipMemcpy(d_a, a, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size * sizeof(int), hipMemcpyHostToDevice);
//callOperation << <size / threads + 1, threads >> > (d_a, d_b, d_res, k, p, size);
//callOperationSharedStatic << <size / threads + 1, threads >> > (d_a, d_b, d_res, k, p, size);
callOperationSharedDynamic << <size / threads + 1, threads, size * sizeof(int) + size * sizeof(int) + size * sizeof(int) >> > (d_a, d_b, d_res, k, p, size);
hipMemcpy(res, d_res, size * sizeof(int), hipMemcpyDeviceToHost);
cout << "\nRezultat:" << endl;
for (int i = 0; i < size; i++) {
cout << res[i] << endl;
}
cout << endl;
system("PAUSE");
return 0;
} | 544d2ad2fc50e97e6cf475cb96d873222f0a723d.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#define size 5
#define threads 32
using namespace std;
__global__ void callOperation(int *a, int *b,int *res, int k, int p, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n) {
return;
}
res[tid] = a[tid] + b[tid];
if (res[tid] > k) {
res[tid] = p;
}
}
__global__ void callOperationSharedStatic(int *a, int *b, int *res, int k, int p, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n) {
return;
}
__shared__ int s_a[size], s_b[size], s_res[size];
__shared__ int s_k, s_p;
s_k = k;
s_p = p;
s_a[tid] = a[tid];
s_b[tid] = b[tid];
s_res[tid] = s_a[tid] + s_b[tid];
if (s_res[tid] > s_k) {
s_res[tid] = s_p;
}
res[tid] = s_res[tid];
}
__global__ void callOperationSharedDynamic(int *a, int *b, int *res, int k, int p, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid>= n)
{
return;
}
extern __shared__ int data[];
int *s_a = data;
int *s_b = &s_a[n];
int *s_res = &s_b[n];
__shared__ int s_k, s_p;
s_k = k;
s_p = p;
s_a[tid] = a[tid];
s_b[tid] = b[tid];
s_res[tid] = res[tid];
s_res[tid] = s_a[tid] + s_b[tid];
if (s_res[tid] > s_k)
{
s_res[tid] = s_p;
}
res[tid] = s_res[tid];
}
int main()
{
int *a, *b, *res;
int *d_a, *d_b, *d_res;
int k, p;
cout << "Unesi broj k:" << endl;
cin >> k;
cout << "Unesi broj p:" << endl;
cin >> p;
a = (int*)malloc(size * sizeof(int));
b = (int*)malloc(size * sizeof(int));
res = (int*)malloc(size * sizeof(int));
for (int i = 0; i < size; i++) {
a[i] = size * i;
b[i] = size / 2 * i;
}
cout << "\nNiz A:" << endl;
for (int i = 0; i < size; i++) {
cout << a[i] << endl;
}
cout << "\nNiz B:" << endl;
for (int i = 0; i < size; i++) {
cout << b[i] << endl;
}
cudaMalloc((void**)&d_a, size * sizeof(int));
cudaMalloc((void**)&d_b, size * sizeof(int));
cudaMalloc((void**)&d_res, size * sizeof(int));
cudaMemcpy(d_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
//callOperation << <size / threads + 1, threads >> > (d_a, d_b, d_res, k, p, size);
//callOperationSharedStatic << <size / threads + 1, threads >> > (d_a, d_b, d_res, k, p, size);
callOperationSharedDynamic << <size / threads + 1, threads, size * sizeof(int) + size * sizeof(int) + size * sizeof(int) >> > (d_a, d_b, d_res, k, p, size);
cudaMemcpy(res, d_res, size * sizeof(int), cudaMemcpyDeviceToHost);
cout << "\nRezultat:" << endl;
for (int i = 0; i < size; i++) {
cout << res[i] << endl;
}
cout << endl;
system("PAUSE");
return 0;
} |
d0209f1db56e64f442016febc1212dd8df8766e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "example.h"
__global__ void test_kernel(void) {
}
namespace Example {
void example(void)
{
hipLaunchKernelGGL(( test_kernel) , dim3(1), dim3(1), 0, 0, );
cout << "Hello, world"s << endl;
}
}
| d0209f1db56e64f442016febc1212dd8df8766e3.cu | #include "example.h"
__global__ void test_kernel(void) {
}
namespace Example {
void example(void)
{
test_kernel <<<1, 1>>> ();
cout << "Hello, world"s << endl;
}
}
|
48cc1e312a97fd21450fd47d52a87f35e6f050b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <caffepro/layers/diag_operation_layer.h>
#include <caffepro/proto/caffe.pb.h>
#include <caffepro/utils/utils.h>
namespace caffepro {
diag_operation_layer::diag_operation_layer(caffepro_context *context, const LayerParameter ¶m)
: caffepro_layer(context, param) {
attr_.num_inputs_min = attr_.num_inputs_max = 1;
attr_.num_outputs_min = attr_.num_outputs_max = 1;
attr_.set_constraint(
layer_attribute::CF_ALLOW_INPLACE
| layer_attribute::CF_REQUIRE_NDIM_4
| layer_attribute::CF_REQUIRE_FIXEDLEN_DIM // remove it in the future
);
}
diag_operation_layer::~diag_operation_layer() {
release_all();
}
void diag_operation_layer::init() {
check_input();
scale_ = layer_param_.diag_operation_param().scale();
shift_ = layer_param_.diag_operation_param().shift();
}
__global__ static void diag_forward(const int count, const int inner_count, const data_type scale, const data_type shift,
const data_type *in, data_type *out) {
CUDA_KERNEL_LOOP(index, count) {
int i = index % inner_count, n = index / inner_count;
if (i == n) {
out[index] = in[index] * scale + shift;
}
else {
out[index] = in[index];
}
}
}
void diag_operation_layer::on_forward(int device_index) {
auto &input = *inputs_[0]->get(device_index);
int count = input.count();
KERNEL_CALL(diag_forward, count)(count, input.inner_count(), scale_, shift_, input.gpu_data(), outputs_[0]->get(device_index)->mutable_gpu_data());
}
__global__ static void diag_backward(const int count, const int inner_count, const data_type scale,
const data_type *top_diff, data_type *bottom_diff, const data_type scale_targets) {
CUDA_KERNEL_LOOP(index, count) {
int i = index % inner_count, n = index / inner_count;
data_type v = top_diff[index];
if (i == n) {
v *= scale;
}
if (scale_targets == 0) {
bottom_diff[index] = v;
}
else {
bottom_diff[index] = bottom_diff[index] * scale_targets + v;
}
}
}
void diag_operation_layer::on_backward(int device_index, act_selector bp_acts, weight_selector bp_weights, act_selector clear_acts_diff, weight_selector clear_weights_diff) {
if (should_bp(bp_acts, 0)) {
data_type beta = get_beta(clear_acts_diff, 0);
auto &input = *inputs_[0]->get(device_index);
int count = input.count();
KERNEL_CALL(diag_backward, count)(count, input.inner_count(), scale_, outputs_[0]->get(device_index)->gpu_diff(), input.mutable_gpu_diff(), beta);
}
}
} | 48cc1e312a97fd21450fd47d52a87f35e6f050b8.cu |
#include <caffepro/layers/diag_operation_layer.h>
#include <caffepro/proto/caffe.pb.h>
#include <caffepro/utils/utils.h>
namespace caffepro {
diag_operation_layer::diag_operation_layer(caffepro_context *context, const LayerParameter ¶m)
: caffepro_layer(context, param) {
attr_.num_inputs_min = attr_.num_inputs_max = 1;
attr_.num_outputs_min = attr_.num_outputs_max = 1;
attr_.set_constraint(
layer_attribute::CF_ALLOW_INPLACE
| layer_attribute::CF_REQUIRE_NDIM_4
| layer_attribute::CF_REQUIRE_FIXEDLEN_DIM // remove it in the future
);
}
diag_operation_layer::~diag_operation_layer() {
release_all();
}
void diag_operation_layer::init() {
check_input();
scale_ = layer_param_.diag_operation_param().scale();
shift_ = layer_param_.diag_operation_param().shift();
}
__global__ static void diag_forward(const int count, const int inner_count, const data_type scale, const data_type shift,
const data_type *in, data_type *out) {
CUDA_KERNEL_LOOP(index, count) {
int i = index % inner_count, n = index / inner_count;
if (i == n) {
out[index] = in[index] * scale + shift;
}
else {
out[index] = in[index];
}
}
}
void diag_operation_layer::on_forward(int device_index) {
auto &input = *inputs_[0]->get(device_index);
int count = input.count();
KERNEL_CALL(diag_forward, count)(count, input.inner_count(), scale_, shift_, input.gpu_data(), outputs_[0]->get(device_index)->mutable_gpu_data());
}
__global__ static void diag_backward(const int count, const int inner_count, const data_type scale,
const data_type *top_diff, data_type *bottom_diff, const data_type scale_targets) {
CUDA_KERNEL_LOOP(index, count) {
int i = index % inner_count, n = index / inner_count;
data_type v = top_diff[index];
if (i == n) {
v *= scale;
}
if (scale_targets == 0) {
bottom_diff[index] = v;
}
else {
bottom_diff[index] = bottom_diff[index] * scale_targets + v;
}
}
}
void diag_operation_layer::on_backward(int device_index, act_selector bp_acts, weight_selector bp_weights, act_selector clear_acts_diff, weight_selector clear_weights_diff) {
if (should_bp(bp_acts, 0)) {
data_type beta = get_beta(clear_acts_diff, 0);
auto &input = *inputs_[0]->get(device_index);
int count = input.count();
KERNEL_CALL(diag_backward, count)(count, input.inner_count(), scale_, outputs_[0]->get(device_index)->gpu_diff(), input.mutable_gpu_diff(), beta);
}
}
} |
012a9509cdae4360ada15df55678939770b353c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
============================================================================
Name : Main.cu
Author : Piotr Lubo
Version :
Copyright :
Description :
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include <fstream>
#include <vector>
#include <string>
#include <iostream>
#include <cstdlib>
#include <iomanip>
#include <unistd.h>
#include "ProblemInstance.h"
#include "Evaluator_hip.cuh"
#include <cfloat>
#include <sys/time.h>
#include <ctime>
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
#define MAX_BLOCK_SIZE 512
typedef long long int64; typedef unsigned long long uint64;
int main(int argc, char * argv[])
{
hipEvent_t start, stop, startCopy, endCopy, startSingle, endSingle;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventCreate(&startCopy);
hipEventCreate(&endCopy);
hipEventCreate(&startSingle);
hipEventCreate(&endSingle);
char cwd[1024];
if (getcwd(cwd, sizeof(cwd)) != NULL)
fprintf(stdout, "Current working dir: %s\n", cwd);
string filename="testfile";
ProblemInstance problem(filename);
float finalResult[problem.length];
char* devExpression = NULL;
float* devVectors = NULL;
float* devResult = NULL;
hipEventRecord(startCopy, 0);
problem.CopyToDevice(devExpression, devVectors, devResult);
hipEventRecord(endCopy, 0);
hipEventSynchronize(endCopy);
float elapsedCopy;
hipEventElapsedTime(&elapsedCopy, startCopy, endCopy);
int explen = problem.ExpLength;
int width = (explen+1)/4;
int threads = MAX_BLOCK_SIZE-(MAX_BLOCK_SIZE%width);
int blocks = ((problem.length * width) / threads);
if(problem.length*width%threads!=0)
blocks++;
int aligned = problem.GetNumOfVariables();
if(aligned%4!=0)
aligned = problem.GetNumOfVariables() + 4 - (problem.GetNumOfVariables()%4);
size_t sharedPerBlock = (threads + (aligned * (MAX_BLOCK_SIZE/width))) * sizeof(float)+ ((explen+1) * sizeof(char));
cout<<"Problem length: "<<problem.length<<endl;
cout<<"Array tree length: "<<explen<<endl;
cout<<"Threads per subproblem: "<<width<<endl;
cout<<"Threads per block: "<<threads<<endl;
cout<<"Blocks: "<<blocks<<endl;
cout<<"Bytes of shared memory per block: "<<sharedPerBlock<<endl;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( Evaluate), dim3(blocks), dim3(MAX_BLOCK_SIZE), sharedPerBlock, 0, devVectors, devResult, devExpression, problem.length, problem.GetNumOfVariables(), explen);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
hipMemcpy(finalResult, devResult, problem.length*sizeof(float), hipMemcpyDeviceToHost);
struct timeval t5, t6;
gettimeofday(&t5, NULL);
vector<float> cpuResults = problem.EvaluateCpu();
gettimeofday(&t6, NULL);
uint64 diff = ((t6.tv_sec - t5.tv_sec) * 1000) +(t6.tv_usec/1000 - t5.tv_usec/1000);
bool ok = true;
for(int i = 0; i < problem.length; i++)
{
if(!(fabs(cpuResults[i] - finalResult[i]) < FLT_EPSILON))
{
cout<<i<<" "<<cpuResults[i]<<" : "<<finalResult[i]<<endl;
ok = false;
break;
}
}
if(ok)
cout<<"Results ok"<<endl;
else
cout<<"Results not ok"<<endl;
memset(finalResult, 0, problem.length * sizeof(float));
hipFree(devExpression);
hipFree(devVectors);
hipFree(devResult);
cout<<"Copying time: "<<elapsedCopy<<endl;
cout<<"Calculation time: "<<elapsedTime<<endl;
cout<<"Cpu calculation time:"<<diff<<endl;
hipFree(devExpression);
hipFree(devVectors);
hipFree(devResult);
}
| 012a9509cdae4360ada15df55678939770b353c2.cu | /*
============================================================================
Name : Main.cu
Author : Piotr Luboń
Version :
Copyright :
Description :
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include <fstream>
#include <vector>
#include <string>
#include <iostream>
#include <cstdlib>
#include <iomanip>
#include <unistd.h>
#include "ProblemInstance.h"
#include "Evaluator.cuh"
#include <cfloat>
#include <sys/time.h>
#include <ctime>
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
#define MAX_BLOCK_SIZE 512
typedef long long int64; typedef unsigned long long uint64;
int main(int argc, char * argv[])
{
cudaEvent_t start, stop, startCopy, endCopy, startSingle, endSingle;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&startCopy);
cudaEventCreate(&endCopy);
cudaEventCreate(&startSingle);
cudaEventCreate(&endSingle);
char cwd[1024];
if (getcwd(cwd, sizeof(cwd)) != NULL)
fprintf(stdout, "Current working dir: %s\n", cwd);
string filename="testfile";
ProblemInstance problem(filename);
float finalResult[problem.length];
char* devExpression = NULL;
float* devVectors = NULL;
float* devResult = NULL;
cudaEventRecord(startCopy, 0);
problem.CopyToDevice(devExpression, devVectors, devResult);
cudaEventRecord(endCopy, 0);
cudaEventSynchronize(endCopy);
float elapsedCopy;
cudaEventElapsedTime(&elapsedCopy, startCopy, endCopy);
int explen = problem.ExpLength;
int width = (explen+1)/4;
int threads = MAX_BLOCK_SIZE-(MAX_BLOCK_SIZE%width);
int blocks = ((problem.length * width) / threads);
if(problem.length*width%threads!=0)
blocks++;
int aligned = problem.GetNumOfVariables();
if(aligned%4!=0)
aligned = problem.GetNumOfVariables() + 4 - (problem.GetNumOfVariables()%4);
size_t sharedPerBlock = (threads + (aligned * (MAX_BLOCK_SIZE/width))) * sizeof(float)+ ((explen+1) * sizeof(char));
cout<<"Problem length: "<<problem.length<<endl;
cout<<"Array tree length: "<<explen<<endl;
cout<<"Threads per subproblem: "<<width<<endl;
cout<<"Threads per block: "<<threads<<endl;
cout<<"Blocks: "<<blocks<<endl;
cout<<"Bytes of shared memory per block: "<<sharedPerBlock<<endl;
cudaEventRecord(start, 0);
Evaluate<<<blocks, MAX_BLOCK_SIZE, sharedPerBlock>>>(devVectors, devResult, devExpression, problem.length, problem.GetNumOfVariables(), explen);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(finalResult, devResult, problem.length*sizeof(float), cudaMemcpyDeviceToHost);
struct timeval t5, t6;
gettimeofday(&t5, NULL);
vector<float> cpuResults = problem.EvaluateCpu();
gettimeofday(&t6, NULL);
uint64 diff = ((t6.tv_sec - t5.tv_sec) * 1000) +(t6.tv_usec/1000 - t5.tv_usec/1000);
bool ok = true;
for(int i = 0; i < problem.length; i++)
{
if(!(fabs(cpuResults[i] - finalResult[i]) < FLT_EPSILON))
{
cout<<i<<" "<<cpuResults[i]<<" : "<<finalResult[i]<<endl;
ok = false;
break;
}
}
if(ok)
cout<<"Results ok"<<endl;
else
cout<<"Results not ok"<<endl;
memset(finalResult, 0, problem.length * sizeof(float));
cudaFree(devExpression);
cudaFree(devVectors);
cudaFree(devResult);
cout<<"Copying time: "<<elapsedCopy<<endl;
cout<<"Calculation time: "<<elapsedTime<<endl;
cout<<"Cpu calculation time:"<<diff<<endl;
cudaFree(devExpression);
cudaFree(devVectors);
cudaFree(devResult);
}
|
ab12943e17c3b201adac7397707508ad6d890bae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i=0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
}
| ab12943e17c3b201adac7397707508ad6d890bae.cu | #include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i=0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
saxpy<<<(N+255)/256, 256>>>(N, 2.0, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
}
|
d3338d0ed2a64037857cde2806bc74a97d25e23b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/native/Pool.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <THH/THHNumerics.cuh>
#include <c10/macros/Macros.h>
#include <ATen/native/hip/LaunchUtils.h>
#include <ATen/hip/HIPApplyUtils.cuh>
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
#define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched
static __device__ inline int p_start(int size, int pad, int kernel, int dilation, int stride) {
return (size + pad < ((kernel - 1) * dilation + 1)) ? 0 : (size + pad - ((kernel - 1) * dilation + 1)) / stride + 1;
}
static __device__ inline int p_end(int size, int pad, int pooled_size, int stride) {
return min((size + pad) / stride + 1, pooled_size);
}
// kernels borrowed from Caffe
template <typename scalar_t, typename accscalar_t>
__global__ void max_pool_forward_nchw(const int nthreads, const scalar_t* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, scalar_t* top_data,
int64_t* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height);
int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width);
while(hstart < 0)
hstart += dilation_h;
while(wstart < 0)
wstart += dilation_w;
accscalar_t maxval = at::numeric_limits<accscalar_t>::lower_bound(); // -Infinity
int maxidx = hstart * width + wstart;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; h += dilation_h) {
for (int w = wstart; w < wend; w += dilation_w) {
scalar_t val = bottom_data[h * width + w];
if ((ScalarConvert<scalar_t, accscalar_t>::to(val) > maxval) || THCNumerics<scalar_t>::isnan(val)) {
maxidx = h * width + w;
maxval = ScalarConvert<scalar_t, accscalar_t>::to(val);
}
}
}
top_data[index] = ScalarConvert<scalar_t, accscalar_t>::to(maxval);
top_mask[index] = maxidx;
}
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)
__global__ void max_pool_forward_nhwc(const scalar_t* bottom_data, const int nbatch,
const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w,
const int in_stride_n, const int in_stride_c,
const int in_stride_h, const int in_stride_w,
const int kernel_stride_C, const int kernel_size_C,
scalar_t* top_data, int64_t* top_mask) {
extern __shared__ int smem[];
int *out_mask_cached = smem;
scalar_t *out_cached = reinterpret_cast<scalar_t*>(&out_mask_cached[kernel_size_C*blockDim.x*blockDim.y*blockDim.z]);
// flattening cta for pre-computation & smem initialization;
int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
int block_size = blockDim.x * blockDim.y * blockDim.z;
// use shared memory to store temporary output value. This is simply to
// reduce register usage.
for (int i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {
out_cached[i] = at::numeric_limits<scalar_t>::lower_bound();
out_mask_cached[i] = 0;
}
__syncthreads();
int batch_id = blockIdx.x % nbatch;
int channel_id = blockIdx.x / nbatch;
int channel_offset = threadIdx.x + channel_id * blockDim.x;
top_data = top_data + batch_id * pooled_height * pooled_width * channels;
top_mask = top_mask + batch_id * pooled_height * pooled_width * channels;
bottom_data = bottom_data + batch_id * in_stride_n;
out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x];
out_mask_cached = &out_mask_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x];
int oH = (pooled_height + gridDim.z-1) / gridDim.z;
int oW = (pooled_width + gridDim.y-1) / gridDim.y;
int ostartH = threadIdx.z + blockIdx.z*oH;
int oendH = ::min(ostartH+oH, pooled_height);
int ostartW = threadIdx.y + blockIdx.y*oW;
int oendW = ::min(ostartW+oW, pooled_width);
for (int oh = ostartH; oh < oendH; oh+=blockDim.z) {
int hstart = oh * stride_h - pad_h;
int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height);
for (int ow = ostartW; ow < oendW; ow+=blockDim.y) {
int wstart = ow * stride_w - pad_w;
int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width);
while(hstart < 0)
hstart += dilation_h;
while(wstart < 0)
wstart += dilation_w;
for (int ih = hstart; ih < hend; ih++) {
for (int iw = wstart; iw < wend; iw++) {
int cached_index = threadIdx.x;
const scalar_t *ptr_input = bottom_data + ih * in_stride_h + iw * in_stride_w;
for(int c = channel_offset; c < channels; c+= blockDim.x*kernel_stride_C) {
scalar_t val = ptr_input[c*in_stride_c];
if ((scalar_cast<accscalar_t>(val) > out_cached[cached_index]) || THCNumerics<scalar_t>::isnan(val)) {
out_cached[cached_index] = scalar_cast<accscalar_t>(val);
out_mask_cached[cached_index] = ih * width + iw;
}
cached_index += blockDim.x;
}
}
}
scalar_t *ptr_output_data = top_data + (oh * pooled_width + ow) * channels;
int64_t *ptr_output_mask = top_mask + (oh * pooled_width + ow) * channels;
int cached_index = threadIdx.x;
for(int c = channel_offset; c < channels; c+= blockDim.x*kernel_stride_C) {
ptr_output_data[c] = out_cached[cached_index];
ptr_output_mask[c] = out_mask_cached[cached_index];
out_cached[cached_index] = at::numeric_limits<scalar_t>::lower_bound();
out_mask_cached[cached_index] = 0;
cached_index += blockDim.x;
}
}
}
}
static const int BLOCK_THREADS = 256;
template <typename scalar_t, typename accscalar_t>
#if defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 4)
#else
C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 8)
#endif
__global__ void max_pool_backward_nchw(const int nthreads, const scalar_t* top_diff,
const int64_t* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w,
scalar_t* bottom_diff) {
CUDA_KERNEL_LOOP(index, height*width) {
int h = index/width;
int w = index - h * width;
int phstart = p_start(h, pad_h, kernel_h, dilation_h, stride_h);
int phend = p_end(h, pad_h, pooled_height, stride_h);
int pwstart = p_start(w, pad_w, kernel_w, dilation_w, stride_w);
int pwend = p_end(w, pad_w, pooled_width, stride_w);
for (int n = blockIdx.y; n < num; n += gridDim.y)
for (int c = blockIdx.z; c < channels; c+= gridDim.z) {
accscalar_t gradient = accscalar_t(0);
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
top_mask += offset;
if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[ph * pooled_width + pw]);
}
}
}
} else {
if (top_mask[phstart * pooled_width + pwstart] == h * width + w) {
gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[phstart * pooled_width + pwstart]);
}
}
bottom_diff[(n*channels+c)*height*width+index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient);
}
}
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)
__global__ void max_pool_backward_nhwc(const int nthreads, const scalar_t* top_diff,
const int64_t* top_mask, const int nbatch, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w,
const int out_stride_c, const int out_stride_h, const int out_stride_w,
const int in_stride_n, const int in_stride_c,
const int in_stride_h, const int in_stride_w,
const int kernel_stride_C, const int kernel_size_C,
scalar_t* bottom_diff) {
extern __shared__ int smem[];
accscalar_t *out_cached = reinterpret_cast<accscalar_t*>(smem);
int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
int block_size = blockDim.x * blockDim.y * blockDim.z;
int batch_id = blockIdx.x % nbatch;
int channel_id = blockIdx.x / nbatch;
int channel_offset = threadIdx.x + channel_id * blockDim.x;
for (int i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {
out_cached[i] = accscalar_t(0.0);
}
__syncthreads();
out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x];
bottom_diff = bottom_diff + batch_id * height * width * channels;
top_mask = top_mask + batch_id * pooled_height * pooled_width * channels;
top_diff = top_diff + batch_id * pooled_height * pooled_width * channels;
int iH = (height + gridDim.z-1) / gridDim.z;
int iW = (width + gridDim.y-1) / gridDim.y;
int istartH = threadIdx.z + blockIdx.z*iH;
int iendH = ::min(istartH+iH, height);
int istartW = threadIdx.y + blockIdx.y*iW;
int iendW = ::min(istartW+iW, width);
for (int ih = istartH; ih < iendH; ih+=blockDim.z) {
int phstart = p_start(ih, pad_h, kernel_h, dilation_h, stride_h);
int phend = p_end(ih, pad_h, pooled_height, stride_h);
for (int iw = istartW; iw < iendW; iw+=blockDim.y) {
int pwstart = p_start(iw, pad_w, kernel_w, dilation_w, stride_w);
int pwend = p_end(iw, pad_w, pooled_width, stride_w);
int index_shift = ih * width + iw;
if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) {
for(int oh = phstart; oh < phend; ++oh) {
for(int ow = pwstart; ow < pwend; ++ow) {
int cached_index = threadIdx.x;
const int64_t* ptr_top_mask = top_mask + oh * out_stride_h + ow * out_stride_w;
for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) {
if (ptr_top_mask[c*out_stride_c] == index_shift) {
out_cached[cached_index] +=
scalar_cast<accscalar_t>(top_diff[oh * out_stride_h + ow * out_stride_w + c*out_stride_c]);
}
cached_index += blockDim.x;
}
}
}
scalar_t *ptr_bottom_diff = bottom_diff + index_shift * channels;
int cached_index = threadIdx.x;
for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) {
ptr_bottom_diff[c] = scalar_cast<scalar_t>(out_cached[cached_index]);
out_cached[cached_index] = accscalar_t(0.0);
cached_index += blockDim.x;
}
} else {
const int64_t* ptr_top_mask = top_mask + phstart * out_stride_h + pwstart * out_stride_w;
scalar_t *ptr_bottom_diff = bottom_diff + index_shift * channels;
int cached_index = threadIdx.x;
for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) {
if (ptr_top_mask[c*out_stride_c] == index_shift) {
ptr_bottom_diff[c] =
scalar_cast<scalar_t>(top_diff[phstart * out_stride_h + pwstart * out_stride_w + c*out_stride_c]);
}
cached_index += blockDim.x;
}
}
}
}
}
void max_pool2d_with_indices_out_cuda_template(
Tensor& output,
Tensor& indices,
const Tensor& input_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ input_, "input_", 3 };
checkAllSameGPU("max_pool2d_with_indices_out_cuda",
{output_arg, indices_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2,
"max_pool2d: kernel_size must either be a single int, or a tuple of two ints")
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);
// NB: stride default is not expressible as an integer constant, so we accept
// empty stride for this case
TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2,
"max_pool2d: stride must either be omitted, a single int, or a tuple of two ints")
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 2,
"max_pool2d: padding must be either be a single int, or a tuple of two ints");
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2,
"max_pool2d: dilation must be either a single int, or a tuple of two ints");
const int dilationH = safe_downcast<int, int64_t>(dilation[0]);
const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]);
const auto memory_format = input_.suggest_memory_format();
if (memory_format == at::MemoryFormat::ChannelsLast) {
TORCH_CHECK(input_.ndimension() == 4,
"non-empty 4D (batch mode) tensor expected for input with channels_last layout");
} else {
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
}
const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1;
const int64_t nInputPlane = input_.size(-3);
const int64_t inputHeight = input_.size(-2);
const int64_t inputWidth = input_.size(-1);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode);
pool2d_shape_check(
input_,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth);
Tensor input = input_.contiguous(memory_format);
const int64_t in_stride_n = input_.ndimension() == 4 ? input.stride(-4) : 0;
const int64_t in_stride_c = input.stride(-3);
const int64_t in_stride_h = input.stride(-2);
const int64_t in_stride_w = input.stride(-1);
output.resize_({nbatch, nInputPlane, outputHeight, outputWidth});
indices.resize_({nbatch, nInputPlane, outputHeight, outputWidth});
output.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
indices.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
const int count = safe_downcast<int, int64_t>(output.numel());
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"max_pool2d_with_indices_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "max_pool2d_with_indices_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *output_data = output.data_ptr<scalar_t>();
scalar_t *input_data = input.data_ptr<scalar_t>();
int64_t *indices_data = indices.data_ptr<int64_t>();
switch (memory_format) {
case MemoryFormat::ChannelsLast: {
const int max_threads = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);
int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;
int block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size()));
int block_y = std::min<int>(
maxThreadsDim[1], std::min<int>(lastPow2(outputWidth), max_threads / block_x));
int block_z = std::min<int>(
maxThreadsDim[2], std::min<int>(lastPow2(outputHeight), max_threads / block_x / block_y));
block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z));
const dim3 block(block_x, block_y, block_z);
int kernel_stride_C = cuda::ATenCeilDiv(
safe_downcast<int, int64_t>(nInputPlane), block_x * 4);
int kernel_size_C = cuda::ATenCeilDiv(
safe_downcast<int, int64_t>(nInputPlane), block_x * kernel_stride_C);
int grid_x = nbatch*kernel_stride_C;
int grid_y = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxGridSize[1],
cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputWidth), block_y*BLOCK_STRIDE));
int grid_z = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxGridSize[2],
cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputHeight), block_z*BLOCK_STRIDE));
const dim3 grid(grid_x, grid_y, grid_z);
size_t shmem_size = (kernel_size_C * block_x*block_y*block_z) * (sizeof(int) + sizeof(scalar_t));
AT_ASSERT(shmem_size <= at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock);
hipLaunchKernelGGL(( max_pool_forward_nhwc<scalar_t, scalar_t>)
, dim3(grid), dim3(block), shmem_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_data, nbatch,
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
in_stride_n, in_stride_c,
in_stride_h, in_stride_w,
kernel_stride_C, kernel_size_C,
output_data, indices_data);
break;
}
case MemoryFormat::Contiguous: {
const int num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock,
BLOCK_THREADS);
hipLaunchKernelGGL(( max_pool_forward_nchw<scalar_t, scalar_t>)
, dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count, input_data,
nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
output_data, indices_data);
break;
}
default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
});
}
);
AT_CUDA_CHECK(hipGetLastError());
if(input.ndimension() == 3) {
output.resize_({nInputPlane, outputHeight, outputWidth});
}
}
void max_pool2d_with_indices_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input_,
const Tensor& indices,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 };
TensorArg input_arg{ input_, "input_", 3 };
TensorArg indices_arg{ indices, "indices", 4 };
checkAllSameGPU("max_pool2d_with_indices_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg, indices_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2,
"max_pool2d: kernel_size must either be a single int, or a tuple of two ints")
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);
// NB: stride default is not expressible as an integer constant, so we accept
// empty stride for this case
TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2,
"max_pool2d: stride must either be omitted, a single int, or a tuple of two ints")
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 2,
"max_pool2d: padding must be either be a single int, or a tuple of two ints");
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2,
"max_pool2d: dilation must be either a single int, or a tuple of two ints");
const int dilationH = safe_downcast<int, int64_t>(dilation[0]);
const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]);
const auto memory_format = input_.suggest_memory_format();
if (memory_format == at::MemoryFormat::ChannelsLast) {
TORCH_CHECK(input_.ndimension() == 4,
"non-empty 4D (batch mode) tensor expected for input with channels_last layout");
} else {
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
}
const Tensor input = input_.contiguous(memory_format);
const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1;
const int64_t nInputPlane = input.size(-3);
const int64_t inputHeight = input.size(-2);
const int64_t inputWidth = input.size(-1);
const int64_t in_stride_n = input.ndimension() == 4 ? input.stride(-4) : 0;
const int64_t in_stride_c = input.stride(-3);
const int64_t in_stride_h = input.stride(-2);
const int64_t in_stride_w = input.stride(-1);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode);
max_pool2d_backward_shape_check(
input_,
gradOutput_,
indices,
nbatch,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
/*cuda=*/ true);
const Tensor gradOutput = gradOutput_.contiguous(memory_format);
const int64_t out_stride_c = gradOutput.stride(-3);
const int64_t out_stride_h = gradOutput.stride(-2);
const int64_t out_stride_w = gradOutput.stride(-1);
gradInput.resize_as_(input);
gradInput.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
int64_t count = input.numel();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"max_pool2d_with_indices_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "max_pool2d_with_indices_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
int64_t *indices_data = indices.data_ptr<int64_t>();
switch (memory_format) {
case MemoryFormat::ChannelsLast: {
const int max_threads = std::min<int>(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);
int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;
int block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size()));
int block_y = std::min<int>(
maxThreadsDim[1], std::min<int>(lastPow2(inputWidth), max_threads / block_x));
int block_z = std::min<int>(
maxThreadsDim[2], std::min<int>(lastPow2(inputHeight), max_threads / block_x / block_y));
block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z));
const dim3 block(block_x, block_y, block_z);
int kernel_stride_C = cuda::ATenCeilDiv(
safe_downcast<int, int64_t>(nInputPlane), block_x * 4);
int kernel_size_C = cuda::ATenCeilDiv(
safe_downcast<int, int64_t>(nInputPlane), block_x * kernel_stride_C);
int grid_x = nbatch*kernel_stride_C;
int grid_y = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxGridSize[1],
cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputWidth), block_y*BLOCK_STRIDE));
int grid_z = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxGridSize[2],
cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputHeight), block_z*BLOCK_STRIDE));
const dim3 grid(grid_x, grid_y, grid_z);
size_t shmem_size = (kernel_size_C * block_x*block_y*block_z) * sizeof(accscalar_t);
AT_ASSERT(shmem_size <= at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock);
// The backward kernel is launched on input instead output.
// If it is launched on output layer, atomic_add would not provide much benefit on FP16.
// Please check comments at https://github.com/pytorch/pytorch/pull/34519.
hipLaunchKernelGGL(( max_pool_backward_nhwc<scalar_t, accscalar_t>)
, dim3(grid), dim3(block), shmem_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
gradOutput_data,
indices_data,
nbatch,
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
out_stride_c, out_stride_h, out_stride_w,
in_stride_n, in_stride_c,
in_stride_h, in_stride_w,
kernel_stride_C, kernel_size_C,
gradInput_data);
break;
}
case MemoryFormat::Contiguous: {
int imgcount = inputWidth * inputHeight;
dim3 grid;
const int blocks = (imgcount + BLOCK_THREADS - 1) / BLOCK_THREADS;
grid.x = blocks;
grid.y = nbatch;
uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
if (maxGridY < grid.y) grid.y = maxGridY;
grid.z = nInputPlane;
uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2];
if (maxGridZ < grid.z) grid.z = maxGridZ;
hipLaunchKernelGGL(( max_pool_backward_nchw<scalar_t, accscalar_t>)
, dim3(grid), dim3(BLOCK_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
gradOutput_data,
indices_data,
nbatch,
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
gradInput_data);
break;
}
default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
});
}
);
AT_CUDA_CHECK(hipGetLastError());
}
} // namespace
std::tuple<Tensor&, Tensor&> max_pool2d_with_indices_out_cuda(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
max_pool2d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> max_pool2d_with_indices_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
NoNamesGuard guard;
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
max_pool2d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
guard.reset();
namedinference::propagate_names(output, input);
namedinference::propagate_names(indices, input);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& max_pool2d_with_indices_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
max_pool2d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
Tensor max_pool2d_with_indices_backward_cuda(
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
max_pool2d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
} // at::native
} // at
| d3338d0ed2a64037857cde2806bc74a97d25e23b.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/native/Pool.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <THC/THCNumerics.cuh>
#include <c10/macros/Macros.h>
#include <ATen/native/cuda/LaunchUtils.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit
#define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched
static __device__ inline int p_start(int size, int pad, int kernel, int dilation, int stride) {
return (size + pad < ((kernel - 1) * dilation + 1)) ? 0 : (size + pad - ((kernel - 1) * dilation + 1)) / stride + 1;
}
static __device__ inline int p_end(int size, int pad, int pooled_size, int stride) {
return min((size + pad) / stride + 1, pooled_size);
}
// kernels borrowed from Caffe
template <typename scalar_t, typename accscalar_t>
__global__ void max_pool_forward_nchw(const int nthreads, const scalar_t* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, scalar_t* top_data,
int64_t* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height);
int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width);
while(hstart < 0)
hstart += dilation_h;
while(wstart < 0)
wstart += dilation_w;
accscalar_t maxval = at::numeric_limits<accscalar_t>::lower_bound(); // -Infinity
int maxidx = hstart * width + wstart;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; h += dilation_h) {
for (int w = wstart; w < wend; w += dilation_w) {
scalar_t val = bottom_data[h * width + w];
if ((ScalarConvert<scalar_t, accscalar_t>::to(val) > maxval) || THCNumerics<scalar_t>::isnan(val)) {
maxidx = h * width + w;
maxval = ScalarConvert<scalar_t, accscalar_t>::to(val);
}
}
}
top_data[index] = ScalarConvert<scalar_t, accscalar_t>::to(maxval);
top_mask[index] = maxidx;
}
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)
__global__ void max_pool_forward_nhwc(const scalar_t* bottom_data, const int nbatch,
const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w,
const int in_stride_n, const int in_stride_c,
const int in_stride_h, const int in_stride_w,
const int kernel_stride_C, const int kernel_size_C,
scalar_t* top_data, int64_t* top_mask) {
extern __shared__ int smem[];
int *out_mask_cached = smem;
scalar_t *out_cached = reinterpret_cast<scalar_t*>(&out_mask_cached[kernel_size_C*blockDim.x*blockDim.y*blockDim.z]);
// flattening cta for pre-computation & smem initialization;
int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
int block_size = blockDim.x * blockDim.y * blockDim.z;
// use shared memory to store temporary output value. This is simply to
// reduce register usage.
for (int i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {
out_cached[i] = at::numeric_limits<scalar_t>::lower_bound();
out_mask_cached[i] = 0;
}
__syncthreads();
int batch_id = blockIdx.x % nbatch;
int channel_id = blockIdx.x / nbatch;
int channel_offset = threadIdx.x + channel_id * blockDim.x;
top_data = top_data + batch_id * pooled_height * pooled_width * channels;
top_mask = top_mask + batch_id * pooled_height * pooled_width * channels;
bottom_data = bottom_data + batch_id * in_stride_n;
out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x];
out_mask_cached = &out_mask_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x];
int oH = (pooled_height + gridDim.z-1) / gridDim.z;
int oW = (pooled_width + gridDim.y-1) / gridDim.y;
int ostartH = threadIdx.z + blockIdx.z*oH;
int oendH = ::min(ostartH+oH, pooled_height);
int ostartW = threadIdx.y + blockIdx.y*oW;
int oendW = ::min(ostartW+oW, pooled_width);
for (int oh = ostartH; oh < oendH; oh+=blockDim.z) {
int hstart = oh * stride_h - pad_h;
int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height);
for (int ow = ostartW; ow < oendW; ow+=blockDim.y) {
int wstart = ow * stride_w - pad_w;
int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width);
while(hstart < 0)
hstart += dilation_h;
while(wstart < 0)
wstart += dilation_w;
for (int ih = hstart; ih < hend; ih++) {
for (int iw = wstart; iw < wend; iw++) {
int cached_index = threadIdx.x;
const scalar_t *ptr_input = bottom_data + ih * in_stride_h + iw * in_stride_w;
for(int c = channel_offset; c < channels; c+= blockDim.x*kernel_stride_C) {
scalar_t val = ptr_input[c*in_stride_c];
if ((scalar_cast<accscalar_t>(val) > out_cached[cached_index]) || THCNumerics<scalar_t>::isnan(val)) {
out_cached[cached_index] = scalar_cast<accscalar_t>(val);
out_mask_cached[cached_index] = ih * width + iw;
}
cached_index += blockDim.x;
}
}
}
scalar_t *ptr_output_data = top_data + (oh * pooled_width + ow) * channels;
int64_t *ptr_output_mask = top_mask + (oh * pooled_width + ow) * channels;
int cached_index = threadIdx.x;
for(int c = channel_offset; c < channels; c+= blockDim.x*kernel_stride_C) {
ptr_output_data[c] = out_cached[cached_index];
ptr_output_mask[c] = out_mask_cached[cached_index];
out_cached[cached_index] = at::numeric_limits<scalar_t>::lower_bound();
out_mask_cached[cached_index] = 0;
cached_index += blockDim.x;
}
}
}
}
static const int BLOCK_THREADS = 256;
template <typename scalar_t, typename accscalar_t>
#if defined (__HIP_PLATFORM_HCC__)
C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 4)
#else
C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 8)
#endif
__global__ void max_pool_backward_nchw(const int nthreads, const scalar_t* top_diff,
const int64_t* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w,
scalar_t* bottom_diff) {
CUDA_KERNEL_LOOP(index, height*width) {
int h = index/width;
int w = index - h * width;
int phstart = p_start(h, pad_h, kernel_h, dilation_h, stride_h);
int phend = p_end(h, pad_h, pooled_height, stride_h);
int pwstart = p_start(w, pad_w, kernel_w, dilation_w, stride_w);
int pwend = p_end(w, pad_w, pooled_width, stride_w);
for (int n = blockIdx.y; n < num; n += gridDim.y)
for (int c = blockIdx.z; c < channels; c+= gridDim.z) {
accscalar_t gradient = accscalar_t(0);
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
top_mask += offset;
if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) {
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[ph * pooled_width + pw]);
}
}
}
} else {
if (top_mask[phstart * pooled_width + pwstart] == h * width + w) {
gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[phstart * pooled_width + pwstart]);
}
}
bottom_diff[(n*channels+c)*height*width+index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient);
}
}
}
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS)
__global__ void max_pool_backward_nhwc(const int nthreads, const scalar_t* top_diff,
const int64_t* top_mask, const int nbatch, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w,
const int out_stride_c, const int out_stride_h, const int out_stride_w,
const int in_stride_n, const int in_stride_c,
const int in_stride_h, const int in_stride_w,
const int kernel_stride_C, const int kernel_size_C,
scalar_t* bottom_diff) {
extern __shared__ int smem[];
accscalar_t *out_cached = reinterpret_cast<accscalar_t*>(smem);
int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);
int block_size = blockDim.x * blockDim.y * blockDim.z;
int batch_id = blockIdx.x % nbatch;
int channel_id = blockIdx.x / nbatch;
int channel_offset = threadIdx.x + channel_id * blockDim.x;
for (int i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) {
out_cached[i] = accscalar_t(0.0);
}
__syncthreads();
out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x];
bottom_diff = bottom_diff + batch_id * height * width * channels;
top_mask = top_mask + batch_id * pooled_height * pooled_width * channels;
top_diff = top_diff + batch_id * pooled_height * pooled_width * channels;
int iH = (height + gridDim.z-1) / gridDim.z;
int iW = (width + gridDim.y-1) / gridDim.y;
int istartH = threadIdx.z + blockIdx.z*iH;
int iendH = ::min(istartH+iH, height);
int istartW = threadIdx.y + blockIdx.y*iW;
int iendW = ::min(istartW+iW, width);
for (int ih = istartH; ih < iendH; ih+=blockDim.z) {
int phstart = p_start(ih, pad_h, kernel_h, dilation_h, stride_h);
int phend = p_end(ih, pad_h, pooled_height, stride_h);
for (int iw = istartW; iw < iendW; iw+=blockDim.y) {
int pwstart = p_start(iw, pad_w, kernel_w, dilation_w, stride_w);
int pwend = p_end(iw, pad_w, pooled_width, stride_w);
int index_shift = ih * width + iw;
if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) {
for(int oh = phstart; oh < phend; ++oh) {
for(int ow = pwstart; ow < pwend; ++ow) {
int cached_index = threadIdx.x;
const int64_t* ptr_top_mask = top_mask + oh * out_stride_h + ow * out_stride_w;
for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) {
if (ptr_top_mask[c*out_stride_c] == index_shift) {
out_cached[cached_index] +=
scalar_cast<accscalar_t>(top_diff[oh * out_stride_h + ow * out_stride_w + c*out_stride_c]);
}
cached_index += blockDim.x;
}
}
}
scalar_t *ptr_bottom_diff = bottom_diff + index_shift * channels;
int cached_index = threadIdx.x;
for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) {
ptr_bottom_diff[c] = scalar_cast<scalar_t>(out_cached[cached_index]);
out_cached[cached_index] = accscalar_t(0.0);
cached_index += blockDim.x;
}
} else {
const int64_t* ptr_top_mask = top_mask + phstart * out_stride_h + pwstart * out_stride_w;
scalar_t *ptr_bottom_diff = bottom_diff + index_shift * channels;
int cached_index = threadIdx.x;
for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) {
if (ptr_top_mask[c*out_stride_c] == index_shift) {
ptr_bottom_diff[c] =
scalar_cast<scalar_t>(top_diff[phstart * out_stride_h + pwstart * out_stride_w + c*out_stride_c]);
}
cached_index += blockDim.x;
}
}
}
}
}
void max_pool2d_with_indices_out_cuda_template(
Tensor& output,
Tensor& indices,
const Tensor& input_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ input_, "input_", 3 };
checkAllSameGPU("max_pool2d_with_indices_out_cuda",
{output_arg, indices_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2,
"max_pool2d: kernel_size must either be a single int, or a tuple of two ints")
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);
// NB: stride default is not expressible as an integer constant, so we accept
// empty stride for this case
TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2,
"max_pool2d: stride must either be omitted, a single int, or a tuple of two ints")
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 2,
"max_pool2d: padding must be either be a single int, or a tuple of two ints");
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2,
"max_pool2d: dilation must be either a single int, or a tuple of two ints");
const int dilationH = safe_downcast<int, int64_t>(dilation[0]);
const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]);
const auto memory_format = input_.suggest_memory_format();
if (memory_format == at::MemoryFormat::ChannelsLast) {
TORCH_CHECK(input_.ndimension() == 4,
"non-empty 4D (batch mode) tensor expected for input with channels_last layout");
} else {
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
}
const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1;
const int64_t nInputPlane = input_.size(-3);
const int64_t inputHeight = input_.size(-2);
const int64_t inputWidth = input_.size(-1);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode);
pool2d_shape_check(
input_,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth);
Tensor input = input_.contiguous(memory_format);
const int64_t in_stride_n = input_.ndimension() == 4 ? input.stride(-4) : 0;
const int64_t in_stride_c = input.stride(-3);
const int64_t in_stride_h = input.stride(-2);
const int64_t in_stride_w = input.stride(-1);
output.resize_({nbatch, nInputPlane, outputHeight, outputWidth});
indices.resize_({nbatch, nInputPlane, outputHeight, outputWidth});
output.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
indices.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
const int count = safe_downcast<int, int64_t>(output.numel());
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"max_pool2d_with_indices_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "max_pool2d_with_indices_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *output_data = output.data_ptr<scalar_t>();
scalar_t *input_data = input.data_ptr<scalar_t>();
int64_t *indices_data = indices.data_ptr<int64_t>();
switch (memory_format) {
case MemoryFormat::ChannelsLast: {
const int max_threads = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);
int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;
int block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size()));
int block_y = std::min<int>(
maxThreadsDim[1], std::min<int>(lastPow2(outputWidth), max_threads / block_x));
int block_z = std::min<int>(
maxThreadsDim[2], std::min<int>(lastPow2(outputHeight), max_threads / block_x / block_y));
block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z));
const dim3 block(block_x, block_y, block_z);
int kernel_stride_C = cuda::ATenCeilDiv(
safe_downcast<int, int64_t>(nInputPlane), block_x * 4);
int kernel_size_C = cuda::ATenCeilDiv(
safe_downcast<int, int64_t>(nInputPlane), block_x * kernel_stride_C);
int grid_x = nbatch*kernel_stride_C;
int grid_y = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxGridSize[1],
cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputWidth), block_y*BLOCK_STRIDE));
int grid_z = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxGridSize[2],
cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputHeight), block_z*BLOCK_STRIDE));
const dim3 grid(grid_x, grid_y, grid_z);
size_t shmem_size = (kernel_size_C * block_x*block_y*block_z) * (sizeof(int) + sizeof(scalar_t));
AT_ASSERT(shmem_size <= at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock);
max_pool_forward_nhwc<scalar_t, scalar_t>
<<<grid, block, shmem_size, at::cuda::getCurrentCUDAStream()>>>(
input_data, nbatch,
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
in_stride_n, in_stride_c,
in_stride_h, in_stride_w,
kernel_stride_C, kernel_size_C,
output_data, indices_data);
break;
}
case MemoryFormat::Contiguous: {
const int num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock,
BLOCK_THREADS);
max_pool_forward_nchw<scalar_t, scalar_t>
<<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>(
count, input_data,
nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
output_data, indices_data);
break;
}
default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
});
}
);
AT_CUDA_CHECK(cudaGetLastError());
if(input.ndimension() == 3) {
output.resize_({nInputPlane, outputHeight, outputWidth});
}
}
void max_pool2d_with_indices_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input_,
const Tensor& indices,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 };
TensorArg input_arg{ input_, "input_", 3 };
TensorArg indices_arg{ indices, "indices", 4 };
checkAllSameGPU("max_pool2d_with_indices_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg, indices_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2,
"max_pool2d: kernel_size must either be a single int, or a tuple of two ints")
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);
// NB: stride default is not expressible as an integer constant, so we accept
// empty stride for this case
TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2,
"max_pool2d: stride must either be omitted, a single int, or a tuple of two ints")
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 2,
"max_pool2d: padding must be either be a single int, or a tuple of two ints");
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2,
"max_pool2d: dilation must be either a single int, or a tuple of two ints");
const int dilationH = safe_downcast<int, int64_t>(dilation[0]);
const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]);
const auto memory_format = input_.suggest_memory_format();
if (memory_format == at::MemoryFormat::ChannelsLast) {
TORCH_CHECK(input_.ndimension() == 4,
"non-empty 4D (batch mode) tensor expected for input with channels_last layout");
} else {
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
}
const Tensor input = input_.contiguous(memory_format);
const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1;
const int64_t nInputPlane = input.size(-3);
const int64_t inputHeight = input.size(-2);
const int64_t inputWidth = input.size(-1);
const int64_t in_stride_n = input.ndimension() == 4 ? input.stride(-4) : 0;
const int64_t in_stride_c = input.stride(-3);
const int64_t in_stride_h = input.stride(-2);
const int64_t in_stride_w = input.stride(-1);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode);
max_pool2d_backward_shape_check(
input_,
gradOutput_,
indices,
nbatch,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
/*cuda=*/ true);
const Tensor gradOutput = gradOutput_.contiguous(memory_format);
const int64_t out_stride_c = gradOutput.stride(-3);
const int64_t out_stride_h = gradOutput.stride(-2);
const int64_t out_stride_w = gradOutput.stride(-1);
gradInput.resize_as_(input);
gradInput.unsafeGetTensorImpl()->empty_tensor_restride(memory_format);
int64_t count = input.numel();
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"max_pool2d_with_indices_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "max_pool2d_with_indices_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
int64_t *indices_data = indices.data_ptr<int64_t>();
switch (memory_format) {
case MemoryFormat::ChannelsLast: {
const int max_threads = std::min<int>(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS);
int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim;
int block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size()));
int block_y = std::min<int>(
maxThreadsDim[1], std::min<int>(lastPow2(inputWidth), max_threads / block_x));
int block_z = std::min<int>(
maxThreadsDim[2], std::min<int>(lastPow2(inputHeight), max_threads / block_x / block_y));
block_x = std::min<int>(
maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z));
const dim3 block(block_x, block_y, block_z);
int kernel_stride_C = cuda::ATenCeilDiv(
safe_downcast<int, int64_t>(nInputPlane), block_x * 4);
int kernel_size_C = cuda::ATenCeilDiv(
safe_downcast<int, int64_t>(nInputPlane), block_x * kernel_stride_C);
int grid_x = nbatch*kernel_stride_C;
int grid_y = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxGridSize[1],
cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputWidth), block_y*BLOCK_STRIDE));
int grid_z = std::min<int>(
at::cuda::getCurrentDeviceProperties()->maxGridSize[2],
cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputHeight), block_z*BLOCK_STRIDE));
const dim3 grid(grid_x, grid_y, grid_z);
size_t shmem_size = (kernel_size_C * block_x*block_y*block_z) * sizeof(accscalar_t);
AT_ASSERT(shmem_size <= at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock);
// The backward kernel is launched on input instead output.
// If it is launched on output layer, atomic_add would not provide much benefit on FP16.
// Please check comments at https://github.com/pytorch/pytorch/pull/34519.
max_pool_backward_nhwc<scalar_t, accscalar_t>
<<<grid, block, shmem_size, at::cuda::getCurrentCUDAStream()>>>(
count,
gradOutput_data,
indices_data,
nbatch,
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
out_stride_c, out_stride_h, out_stride_w,
in_stride_n, in_stride_c,
in_stride_h, in_stride_w,
kernel_stride_C, kernel_size_C,
gradInput_data);
break;
}
case MemoryFormat::Contiguous: {
int imgcount = inputWidth * inputHeight;
dim3 grid;
const int blocks = (imgcount + BLOCK_THREADS - 1) / BLOCK_THREADS;
grid.x = blocks;
grid.y = nbatch;
uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
if (maxGridY < grid.y) grid.y = maxGridY;
grid.z = nInputPlane;
uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2];
if (maxGridZ < grid.z) grid.z = maxGridZ;
max_pool_backward_nchw<scalar_t, accscalar_t>
<<<grid, BLOCK_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
gradOutput_data,
indices_data,
nbatch,
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
gradInput_data);
break;
}
default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
});
}
);
AT_CUDA_CHECK(cudaGetLastError());
}
} // namespace
std::tuple<Tensor&, Tensor&> max_pool2d_with_indices_out_cuda(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
max_pool2d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> max_pool2d_with_indices_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
NoNamesGuard guard;
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
max_pool2d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
guard.reset();
namedinference::propagate_names(output, input);
namedinference::propagate_names(indices, input);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& max_pool2d_with_indices_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
max_pool2d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
Tensor max_pool2d_with_indices_backward_cuda(
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
max_pool2d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
} // at::native
} // at
|
185128316bb6b5459359cf41d8a126b1b9732ed1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author raver119@gmail.com
//
#include <array/CudaPointerDeallocator.h>
namespace sd {
void CudaPointerDeallocator::release(void *ptr) {
hipFree(ptr);
}
} // namespace sd
| 185128316bb6b5459359cf41d8a126b1b9732ed1.cu | /*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author raver119@gmail.com
//
#include <array/CudaPointerDeallocator.h>
namespace sd {
void CudaPointerDeallocator::release(void *ptr) {
cudaFree(ptr);
}
} // namespace sd
|
7232aa0d56689ccd31f944ce786d7cad97ee7c97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <cmath>
#include <iostream>
#include <random/mvg.cuh>
#include <random>
#include "test_utils.h"
// mvg.h takes in matrices that are colomn major (as in fortan)
#define IDX2C(i, j, ld) (j * ld + i)
namespace MLCommon {
namespace Random {
// helper kernels
/// @todo Duplicate called vctwiseAccumulate in utils.h (Kalman Filters,
// i think that is much better to use., more general)
template <typename T>
__global__ void En_KF_accumulate(const int nPoints, const int dim, const T* X, T* x)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int col = idx % dim;
int row = idx / dim;
if (col < dim && row < nPoints) raft::myAtomicAdd(x + col, X[idx]);
}
template <typename T>
__global__ void En_KF_normalize(const int divider, const int dim, T* x)
{
int xi = threadIdx.x + blockDim.x * blockIdx.x;
if (xi < dim) x[xi] = x[xi] / divider;
}
template <typename T>
__global__ void En_KF_dif(const int nPoints, const int dim, const T* X, const T* x, T* X_diff)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int col = idx % dim;
int row = idx / dim;
if (col < dim && row < nPoints) X_diff[idx] = X[idx] - x[col];
}
// for specialising tests
enum Correlation : unsigned char {
CORRELATED, // = 0
UNCORRELATED
};
template <typename T>
struct MVGInputs {
T tolerance;
typename MultiVarGaussian<T>::Decomposer method;
Correlation corr;
int dim, nPoints;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const MVGInputs<T>& dims)
{
return os;
}
template <typename T>
class MVGTest : public ::testing::TestWithParam<MVGInputs<T>> {
protected:
void SetUp() override
{
// getting params
params = ::testing::TestWithParam<MVGInputs<T>>::GetParam();
dim = params.dim;
nPoints = params.nPoints;
method = params.method;
corr = params.corr;
tolerance = params.tolerance;
CUBLAS_CHECK(hipblasCreate(&cublasH));
CUSOLVER_CHECK(hipsolverDnCreate(&cusolverH));
CUDA_CHECK(hipStreamCreate(&stream));
// preparing to store stuff
P = (T*)malloc(sizeof(T) * dim * dim);
x = (T*)malloc(sizeof(T) * dim);
X = (T*)malloc(sizeof(T) * dim * nPoints);
CUDA_CHECK(hipMalloc((void**)&P_d, sizeof(T) * dim * dim));
CUDA_CHECK(hipMalloc((void**)&X_d, sizeof(T) * nPoints * dim));
CUDA_CHECK(hipMalloc((void**)&x_d, sizeof(T) * dim));
CUDA_CHECK(hipMalloc((void**)&Rand_cov, sizeof(T) * dim * dim));
CUDA_CHECK(hipMalloc((void**)&Rand_mean, sizeof(T) * dim));
// generating random mean and cov.
srand(params.seed);
for (int j = 0; j < dim; j++)
x[j] = rand() % 100 + 5.0f;
// for random Cov. martix
std::default_random_engine generator(params.seed);
std::uniform_real_distribution<T> distribution(0.0, 1.0);
// P (developing a +ve definite symm matrix)
for (int j = 0; j < dim; j++) {
for (int i = 0; i < j + 1; i++) {
T k = distribution(generator);
if (corr == UNCORRELATED) k = 0.0;
P[IDX2C(i, j, dim)] = k;
P[IDX2C(j, i, dim)] = k;
if (i == j) P[IDX2C(i, j, dim)] += dim;
}
}
// porting inputs to gpu
raft::update_device(P_d, P, dim * dim, stream);
raft::update_device(x_d, x, dim, stream);
// initilizing the mvg
mvg = new MultiVarGaussian<T>(dim, method);
size_t o = mvg->init(cublasH, cusolverH, stream);
// give the workspace area to mvg
CUDA_CHECK(hipMalloc((void**)&workspace_d, o));
mvg->set_workspace(workspace_d);
// get gaussians in X_d | P_d is destroyed.
mvg->give_gaussian(nPoints, P_d, X_d, x_d);
// saving the mean of the randoms in Rand_mean
//@todo can be swapped with a API that calculates mean
CUDA_CHECK(hipMemset(Rand_mean, 0, dim * sizeof(T)));
dim3 block = (64);
dim3 grid = (raft::ceildiv(nPoints * dim, (int)block.x));
hipLaunchKernelGGL(( En_KF_accumulate), dim3(grid), dim3(block), 0, 0, nPoints, dim, X_d, Rand_mean);
CUDA_CHECK(hipPeekAtLastError());
grid = (raft::ceildiv(dim, (int)block.x));
hipLaunchKernelGGL(( En_KF_normalize), dim3(grid), dim3(block), 0, 0, nPoints, dim, Rand_mean);
CUDA_CHECK(hipPeekAtLastError());
// storing the error wrt random point mean in X_d
grid = (raft::ceildiv(dim * nPoints, (int)block.x));
hipLaunchKernelGGL(( En_KF_dif), dim3(grid), dim3(block), 0, 0, nPoints, dim, X_d, Rand_mean, X_d);
CUDA_CHECK(hipPeekAtLastError());
// finding the cov matrix, placing in Rand_cov
T alfa = 1.0 / (nPoints - 1), beta = 0.0;
hipblasHandle_t handle;
CUBLAS_CHECK(hipblasCreate(&handle));
CUBLAS_CHECK(raft::linalg::cublasgemm(handle,
HIPBLAS_OP_N,
HIPBLAS_OP_T,
dim,
dim,
nPoints,
&alfa,
X_d,
dim,
X_d,
dim,
&beta,
Rand_cov,
dim,
stream));
// restoring cov provided into P_d
raft::update_device(P_d, P, dim * dim, stream);
}
void TearDown() override
{
// freeing mallocs
CUDA_CHECK(hipFree(P_d));
CUDA_CHECK(hipFree(X_d));
CUDA_CHECK(hipFree(workspace_d));
free(P);
free(x);
free(X);
// deleting mvg
mvg->deinit();
delete mvg;
CUBLAS_CHECK(hipblasDestroy(cublasH));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolverH));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
MVGInputs<T> params;
T *P, *x, *X, *workspace_d, *P_d, *x_d, *X_d;
int dim, nPoints;
typename MultiVarGaussian<T>::Decomposer method;
Correlation corr;
MultiVarGaussian<T>* mvg = NULL;
T *Rand_cov, *Rand_mean, tolerance;
hipblasHandle_t cublasH;
hipsolverDnHandle_t cusolverH;
hipStream_t stream;
}; // end of MVGTest class
///@todo find out the reason that Un-correlated covs are giving problems (in qr)
// Declare your inputs
const std::vector<MVGInputs<float>> inputsf = {
{0.3f, MultiVarGaussian<float>::Decomposer::chol_decomp, Correlation::CORRELATED, 5, 30000, 6ULL},
{0.1f,
MultiVarGaussian<float>::Decomposer::chol_decomp,
Correlation::UNCORRELATED,
5,
30000,
6ULL},
{0.25f, MultiVarGaussian<float>::Decomposer::jacobi, Correlation::CORRELATED, 5, 30000, 6ULL},
{0.1f, MultiVarGaussian<float>::Decomposer::jacobi, Correlation::UNCORRELATED, 5, 30000, 6ULL},
{0.2f, MultiVarGaussian<float>::Decomposer::qr, Correlation::CORRELATED, 5, 30000, 6ULL},
// { 0.2f, MultiVarGaussian<float>::Decomposer::qr,
// Correlation::UNCORRELATED, 5, 30000, 6ULL}
};
const std::vector<MVGInputs<double>> inputsd = {
{0.25,
MultiVarGaussian<double>::Decomposer::chol_decomp,
Correlation::CORRELATED,
10,
3000000,
6ULL},
{0.1,
MultiVarGaussian<double>::Decomposer::chol_decomp,
Correlation::UNCORRELATED,
10,
3000000,
6ULL},
{0.25, MultiVarGaussian<double>::Decomposer::jacobi, Correlation::CORRELATED, 10, 3000000, 6ULL},
{0.1, MultiVarGaussian<double>::Decomposer::jacobi, Correlation::UNCORRELATED, 10, 3000000, 6ULL},
{0.2, MultiVarGaussian<double>::Decomposer::qr, Correlation::CORRELATED, 10, 3000000, 6ULL},
// { 0.2, MultiVarGaussian<double>::Decomposer::qr,
// Correlation::UNCORRELATED, 10, 3000000, 6ULL}
};
// make the tests
typedef MVGTest<float> MVGTestF;
typedef MVGTest<double> MVGTestD;
TEST_P(MVGTestF, MeanIsCorrectF)
{
EXPECT_TRUE(raft::devArrMatch(x_d, Rand_mean, dim, raft::CompareApprox<float>(tolerance)))
<< " in MeanIsCorrect";
}
TEST_P(MVGTestF, CovIsCorrectF)
{
EXPECT_TRUE(raft::devArrMatch(P_d, Rand_cov, dim, dim, raft::CompareApprox<float>(tolerance)))
<< " in CovIsCorrect";
}
TEST_P(MVGTestD, MeanIsCorrectD)
{
EXPECT_TRUE(raft::devArrMatch(x_d, Rand_mean, dim, raft::CompareApprox<double>(tolerance)))
<< " in MeanIsCorrect";
}
TEST_P(MVGTestD, CovIsCorrectD)
{
EXPECT_TRUE(raft::devArrMatch(P_d, Rand_cov, dim, dim, raft::CompareApprox<double>(tolerance)))
<< " in CovIsCorrect";
}
// call the tests
INSTANTIATE_TEST_CASE_P(MVGTests, MVGTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MVGTests, MVGTestD, ::testing::ValuesIn(inputsd));
}; // end of namespace Random
}; // end of namespace MLCommon
| 7232aa0d56689ccd31f944ce786d7cad97ee7c97.cu | /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <cmath>
#include <iostream>
#include <random/mvg.cuh>
#include <random>
#include "test_utils.h"
// mvg.h takes in matrices that are colomn major (as in fortan)
#define IDX2C(i, j, ld) (j * ld + i)
namespace MLCommon {
namespace Random {
// helper kernels
/// @todo Duplicate called vctwiseAccumulate in utils.h (Kalman Filters,
// i think that is much better to use., more general)
template <typename T>
__global__ void En_KF_accumulate(const int nPoints, const int dim, const T* X, T* x)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int col = idx % dim;
int row = idx / dim;
if (col < dim && row < nPoints) raft::myAtomicAdd(x + col, X[idx]);
}
template <typename T>
__global__ void En_KF_normalize(const int divider, const int dim, T* x)
{
int xi = threadIdx.x + blockDim.x * blockIdx.x;
if (xi < dim) x[xi] = x[xi] / divider;
}
template <typename T>
__global__ void En_KF_dif(const int nPoints, const int dim, const T* X, const T* x, T* X_diff)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int col = idx % dim;
int row = idx / dim;
if (col < dim && row < nPoints) X_diff[idx] = X[idx] - x[col];
}
// for specialising tests
enum Correlation : unsigned char {
CORRELATED, // = 0
UNCORRELATED
};
template <typename T>
struct MVGInputs {
T tolerance;
typename MultiVarGaussian<T>::Decomposer method;
Correlation corr;
int dim, nPoints;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const MVGInputs<T>& dims)
{
return os;
}
template <typename T>
class MVGTest : public ::testing::TestWithParam<MVGInputs<T>> {
protected:
void SetUp() override
{
// getting params
params = ::testing::TestWithParam<MVGInputs<T>>::GetParam();
dim = params.dim;
nPoints = params.nPoints;
method = params.method;
corr = params.corr;
tolerance = params.tolerance;
CUBLAS_CHECK(cublasCreate(&cublasH));
CUSOLVER_CHECK(cusolverDnCreate(&cusolverH));
CUDA_CHECK(cudaStreamCreate(&stream));
// preparing to store stuff
P = (T*)malloc(sizeof(T) * dim * dim);
x = (T*)malloc(sizeof(T) * dim);
X = (T*)malloc(sizeof(T) * dim * nPoints);
CUDA_CHECK(cudaMalloc((void**)&P_d, sizeof(T) * dim * dim));
CUDA_CHECK(cudaMalloc((void**)&X_d, sizeof(T) * nPoints * dim));
CUDA_CHECK(cudaMalloc((void**)&x_d, sizeof(T) * dim));
CUDA_CHECK(cudaMalloc((void**)&Rand_cov, sizeof(T) * dim * dim));
CUDA_CHECK(cudaMalloc((void**)&Rand_mean, sizeof(T) * dim));
// generating random mean and cov.
srand(params.seed);
for (int j = 0; j < dim; j++)
x[j] = rand() % 100 + 5.0f;
// for random Cov. martix
std::default_random_engine generator(params.seed);
std::uniform_real_distribution<T> distribution(0.0, 1.0);
// P (developing a +ve definite symm matrix)
for (int j = 0; j < dim; j++) {
for (int i = 0; i < j + 1; i++) {
T k = distribution(generator);
if (corr == UNCORRELATED) k = 0.0;
P[IDX2C(i, j, dim)] = k;
P[IDX2C(j, i, dim)] = k;
if (i == j) P[IDX2C(i, j, dim)] += dim;
}
}
// porting inputs to gpu
raft::update_device(P_d, P, dim * dim, stream);
raft::update_device(x_d, x, dim, stream);
// initilizing the mvg
mvg = new MultiVarGaussian<T>(dim, method);
size_t o = mvg->init(cublasH, cusolverH, stream);
// give the workspace area to mvg
CUDA_CHECK(cudaMalloc((void**)&workspace_d, o));
mvg->set_workspace(workspace_d);
// get gaussians in X_d | P_d is destroyed.
mvg->give_gaussian(nPoints, P_d, X_d, x_d);
// saving the mean of the randoms in Rand_mean
//@todo can be swapped with a API that calculates mean
CUDA_CHECK(cudaMemset(Rand_mean, 0, dim * sizeof(T)));
dim3 block = (64);
dim3 grid = (raft::ceildiv(nPoints * dim, (int)block.x));
En_KF_accumulate<<<grid, block>>>(nPoints, dim, X_d, Rand_mean);
CUDA_CHECK(cudaPeekAtLastError());
grid = (raft::ceildiv(dim, (int)block.x));
En_KF_normalize<<<grid, block>>>(nPoints, dim, Rand_mean);
CUDA_CHECK(cudaPeekAtLastError());
// storing the error wrt random point mean in X_d
grid = (raft::ceildiv(dim * nPoints, (int)block.x));
En_KF_dif<<<grid, block>>>(nPoints, dim, X_d, Rand_mean, X_d);
CUDA_CHECK(cudaPeekAtLastError());
// finding the cov matrix, placing in Rand_cov
T alfa = 1.0 / (nPoints - 1), beta = 0.0;
cublasHandle_t handle;
CUBLAS_CHECK(cublasCreate(&handle));
CUBLAS_CHECK(raft::linalg::cublasgemm(handle,
CUBLAS_OP_N,
CUBLAS_OP_T,
dim,
dim,
nPoints,
&alfa,
X_d,
dim,
X_d,
dim,
&beta,
Rand_cov,
dim,
stream));
// restoring cov provided into P_d
raft::update_device(P_d, P, dim * dim, stream);
}
void TearDown() override
{
// freeing mallocs
CUDA_CHECK(cudaFree(P_d));
CUDA_CHECK(cudaFree(X_d));
CUDA_CHECK(cudaFree(workspace_d));
free(P);
free(x);
free(X);
// deleting mvg
mvg->deinit();
delete mvg;
CUBLAS_CHECK(cublasDestroy(cublasH));
CUSOLVER_CHECK(cusolverDnDestroy(cusolverH));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
MVGInputs<T> params;
T *P, *x, *X, *workspace_d, *P_d, *x_d, *X_d;
int dim, nPoints;
typename MultiVarGaussian<T>::Decomposer method;
Correlation corr;
MultiVarGaussian<T>* mvg = NULL;
T *Rand_cov, *Rand_mean, tolerance;
cublasHandle_t cublasH;
cusolverDnHandle_t cusolverH;
cudaStream_t stream;
}; // end of MVGTest class
///@todo find out the reason that Un-correlated covs are giving problems (in qr)
// Declare your inputs
const std::vector<MVGInputs<float>> inputsf = {
{0.3f, MultiVarGaussian<float>::Decomposer::chol_decomp, Correlation::CORRELATED, 5, 30000, 6ULL},
{0.1f,
MultiVarGaussian<float>::Decomposer::chol_decomp,
Correlation::UNCORRELATED,
5,
30000,
6ULL},
{0.25f, MultiVarGaussian<float>::Decomposer::jacobi, Correlation::CORRELATED, 5, 30000, 6ULL},
{0.1f, MultiVarGaussian<float>::Decomposer::jacobi, Correlation::UNCORRELATED, 5, 30000, 6ULL},
{0.2f, MultiVarGaussian<float>::Decomposer::qr, Correlation::CORRELATED, 5, 30000, 6ULL},
// { 0.2f, MultiVarGaussian<float>::Decomposer::qr,
// Correlation::UNCORRELATED, 5, 30000, 6ULL}
};
const std::vector<MVGInputs<double>> inputsd = {
{0.25,
MultiVarGaussian<double>::Decomposer::chol_decomp,
Correlation::CORRELATED,
10,
3000000,
6ULL},
{0.1,
MultiVarGaussian<double>::Decomposer::chol_decomp,
Correlation::UNCORRELATED,
10,
3000000,
6ULL},
{0.25, MultiVarGaussian<double>::Decomposer::jacobi, Correlation::CORRELATED, 10, 3000000, 6ULL},
{0.1, MultiVarGaussian<double>::Decomposer::jacobi, Correlation::UNCORRELATED, 10, 3000000, 6ULL},
{0.2, MultiVarGaussian<double>::Decomposer::qr, Correlation::CORRELATED, 10, 3000000, 6ULL},
// { 0.2, MultiVarGaussian<double>::Decomposer::qr,
// Correlation::UNCORRELATED, 10, 3000000, 6ULL}
};
// make the tests
typedef MVGTest<float> MVGTestF;
typedef MVGTest<double> MVGTestD;
TEST_P(MVGTestF, MeanIsCorrectF)
{
EXPECT_TRUE(raft::devArrMatch(x_d, Rand_mean, dim, raft::CompareApprox<float>(tolerance)))
<< " in MeanIsCorrect";
}
TEST_P(MVGTestF, CovIsCorrectF)
{
EXPECT_TRUE(raft::devArrMatch(P_d, Rand_cov, dim, dim, raft::CompareApprox<float>(tolerance)))
<< " in CovIsCorrect";
}
TEST_P(MVGTestD, MeanIsCorrectD)
{
EXPECT_TRUE(raft::devArrMatch(x_d, Rand_mean, dim, raft::CompareApprox<double>(tolerance)))
<< " in MeanIsCorrect";
}
TEST_P(MVGTestD, CovIsCorrectD)
{
EXPECT_TRUE(raft::devArrMatch(P_d, Rand_cov, dim, dim, raft::CompareApprox<double>(tolerance)))
<< " in CovIsCorrect";
}
// call the tests
INSTANTIATE_TEST_CASE_P(MVGTests, MVGTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MVGTests, MVGTestD, ::testing::ValuesIn(inputsd));
}; // end of namespace Random
}; // end of namespace MLCommon
|
176833eeef4b6a4c20dd25e978d04a746b2de348.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// kernel copying everything except the last element
__global__ void
magma_zpreselect_gpu0(
magma_int_t num_rows,
magmaIndex_ptr row,
magmaDoubleComplex *val,
magmaDoubleComplex *valn)
{
int tidx = threadIdx.x;
int bidx = blockIdx.x;
int gtidx = bidx * blockDim.x + tidx;
if (gtidx < num_rows) {
for (int i=row[gtidx]; i<row[gtidx+1]-1; i++){
valn[i-gtidx] = val[i];
}
}
}
// kernel copying everything except the first element
__global__ void
magma_zpreselect_gpu1(
magma_int_t num_rows,
magmaIndex_ptr row,
magmaDoubleComplex *val,
magmaDoubleComplex *valn)
{
int tidx = threadIdx.x;
int bidx = blockIdx.x;
int gtidx = bidx * blockDim.x + tidx;
if (gtidx < num_rows) {
for (int i=row[gtidx]+1; i<row[gtidx+1]; i++){
valn[i-gtidx] = val[i];
}
}
}
/***************************************************************************//**
Purpose
-------
This function takes a list of candidates with residuals,
and selects the largest in every row. The output matrix only contains these
largest elements (respectively a zero element if there is no candidate for
a certain row).
Arguments
---------
@param[in]
order magma_int_t
order==0 lower triangular
order==1 upper triangular
@param[in]
A magma_z_matrix*
Matrix where elements are removed.
@param[out]
oneA magma_z_matrix*
Matrix where elements are removed.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
*******************************************************************************/
extern "C" magma_int_t
magma_zpreselect_gpu(
magma_int_t order,
magma_z_matrix *A,
magma_z_matrix *oneA,
magma_queue_t queue )
{
magma_int_t info = 0;
dim3 block(BLOCK_SIZE, 1, 1);
dim3 grid(magma_ceildiv(A->num_rows, BLOCK_SIZE), 1, 1);
oneA->num_rows = A->num_rows;
oneA->num_cols = A->num_cols;
oneA->nnz = A->nnz - A->num_rows;
oneA->storage_type = Magma_CSR;
oneA->memory_location = Magma_DEV;
CHECK( magma_zmalloc( &oneA->dval, oneA->nnz ) );
if( order == 1 ){ // don't copy the first
hipLaunchKernelGGL(( magma_zpreselect_gpu1), dim3(grid), dim3(block), 0, queue->cuda_stream(),
A->num_rows, A->drow, A->dval, oneA->dval );
// #pragma omp parallel for
// for( magma_int_t row=0; row<A->num_rows; row++){
// for( magma_int_t i=A->row[row]+1; i<A->row[row+1]; i++ ){
// oneA->val[ i-row ] = A->val[i];
// }
// }
} else { // don't copy the last
hipLaunchKernelGGL(( magma_zpreselect_gpu0), dim3(grid), dim3(block), 0, queue->cuda_stream(),
A->num_rows, A->drow, A->dval, oneA->dval );
// #pragma omp parallel for
// for( magma_int_t row=0; row<A->num_rows; row++){
// for( magma_int_t i=A->row[row]; i<A->row[row+1]-1; i++ ){
// oneA->val[ i-row ] = A->val[i];
// }
// }
}
cleanup:
return info;
}
| 176833eeef4b6a4c20dd25e978d04a746b2de348.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// kernel copying everything except the last element
__global__ void
magma_zpreselect_gpu0(
magma_int_t num_rows,
magmaIndex_ptr row,
magmaDoubleComplex *val,
magmaDoubleComplex *valn)
{
int tidx = threadIdx.x;
int bidx = blockIdx.x;
int gtidx = bidx * blockDim.x + tidx;
if (gtidx < num_rows) {
for (int i=row[gtidx]; i<row[gtidx+1]-1; i++){
valn[i-gtidx] = val[i];
}
}
}
// kernel copying everything except the first element
__global__ void
magma_zpreselect_gpu1(
magma_int_t num_rows,
magmaIndex_ptr row,
magmaDoubleComplex *val,
magmaDoubleComplex *valn)
{
int tidx = threadIdx.x;
int bidx = blockIdx.x;
int gtidx = bidx * blockDim.x + tidx;
if (gtidx < num_rows) {
for (int i=row[gtidx]+1; i<row[gtidx+1]; i++){
valn[i-gtidx] = val[i];
}
}
}
/***************************************************************************//**
Purpose
-------
This function takes a list of candidates with residuals,
and selects the largest in every row. The output matrix only contains these
largest elements (respectively a zero element if there is no candidate for
a certain row).
Arguments
---------
@param[in]
order magma_int_t
order==0 lower triangular
order==1 upper triangular
@param[in]
A magma_z_matrix*
Matrix where elements are removed.
@param[out]
oneA magma_z_matrix*
Matrix where elements are removed.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zaux
*******************************************************************************/
extern "C" magma_int_t
magma_zpreselect_gpu(
magma_int_t order,
magma_z_matrix *A,
magma_z_matrix *oneA,
magma_queue_t queue )
{
magma_int_t info = 0;
dim3 block(BLOCK_SIZE, 1, 1);
dim3 grid(magma_ceildiv(A->num_rows, BLOCK_SIZE), 1, 1);
oneA->num_rows = A->num_rows;
oneA->num_cols = A->num_cols;
oneA->nnz = A->nnz - A->num_rows;
oneA->storage_type = Magma_CSR;
oneA->memory_location = Magma_DEV;
CHECK( magma_zmalloc( &oneA->dval, oneA->nnz ) );
if( order == 1 ){ // don't copy the first
magma_zpreselect_gpu1<<<grid, block, 0, queue->cuda_stream()>>>
( A->num_rows, A->drow, A->dval, oneA->dval );
// #pragma omp parallel for
// for( magma_int_t row=0; row<A->num_rows; row++){
// for( magma_int_t i=A->row[row]+1; i<A->row[row+1]; i++ ){
// oneA->val[ i-row ] = A->val[i];
// }
// }
} else { // don't copy the last
magma_zpreselect_gpu0<<<grid, block, 0, queue->cuda_stream()>>>
( A->num_rows, A->drow, A->dval, oneA->dval );
// #pragma omp parallel for
// for( magma_int_t row=0; row<A->num_rows; row++){
// for( magma_int_t i=A->row[row]; i<A->row[row+1]-1; i++ ){
// oneA->val[ i-row ] = A->val[i];
// }
// }
}
cleanup:
return info;
}
|
db7dc6d79d78b45c39a995d708c6ba4e7d1b451c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <sgazeos@gmail.com>
//
#include <array/NDArrayFactory.h>
#include <exceptions/cuda_exception.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
namespace sd {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Segment ops linear kernels
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void segmentSumLinearKernel(const void* input, const sd::LongType* inputShape, int* starts,
int* lengths, sd::LongType numOfClasses, void* output,
const sd::LongType* outputShape) {
__shared__ T* val;
__shared__ sd::LongType xLen, zLen, segment, zIndex;
__shared__ const T* x;
__shared__ T* z;
__shared__ int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
segment = blockIdx.x / threadsPerSegment;
x = reinterpret_cast<const T*>(input);
z = reinterpret_cast<T*>(output);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
start = starts[segment];
finish = start + lengths[segment];
// val[segment] = ;
z[zIndex] = x[shape::getIndexOffset(start, inputShape)];
}
}
__syncthreads();
for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
sd::math::atomics::sd_atomicAdd(&z[zIndex], x[xIndex]);
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void unsortedSegmentSumLinearKernel(const void* input, const sd::LongType* inputShape,
const void* indices, const sd::LongType* indicesShape, int* starts,
int* lengths, sd::LongType numOfClasses, void* output,
const sd::LongType* outputShape) {
__shared__ T* val;
__shared__ sd::LongType xLen, zLen, segment, zIndex;
__shared__ const T* x;
__shared__ T* z;
__shared__ const I* y; // int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
segment = blockIdx.x;
x = reinterpret_cast<const T*>(input);
z = reinterpret_cast<T*>(output);
y = reinterpret_cast<const I*>(indices);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
zIndex = shape::getIndexOffset(segment, outputShape);
if (lengths[segment] > 0)
z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)];
else
z[zIndex] = 0; // DataTypeUtils::max<T>();
}
__syncthreads();
if (lengths[segment] > 0)
for (auto e = threadIdx.x; e < xLen; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
auto yIndex = shape::getIndexOffset(e, indicesShape);
if (y[yIndex] == segment && e != starts[segment]) {
sd::math::atomics::sd_atomicAdd(&z[zIndex], x[xIndex]);
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// SegmentSum kernel
template <typename T, typename I>
static SD_KERNEL void segmentSumTadKernel(const void* inputBuf, const sd::LongType* inputShape,
const sd::LongType* inputTads, const sd::LongType* inputTadOffsets,
const I* indices, int* starts, int* lengths, sd::LongType numOfClasses,
void* outputBuf, const sd::LongType* outputShape,
const sd::LongType* outputTads, const sd::LongType* outputTadOffsets) {
__shared__ T* val;
__shared__ sd::LongType len, zIndex, total;
__shared__ T* z;
__shared__ int start, finish;
if (threadIdx.x == 0) {
auto segment = indices[blockIdx.x]; // / threadsPerSegment;
z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment];
len = shape::length(inputTads);
start = starts[segment];
finish = start + lengths[segment];
total = shape::sizeAt(inputShape, 0);
}
__syncthreads();
auto idx = blockIdx.x;
if (blockIdx.x <= total) {
auto x = reinterpret_cast<const T*>(inputBuf) + inputTadOffsets[idx];
if (blockIdx.x == start) {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
sd::math::atomics::sd_atomicAdd(&z[zIndex], x[xIndex]);
}
} else {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
if (lengths[indices[idx]]) sd::math::atomics::sd_atomicAdd(&z[zIndex], x[xIndex]);
}
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void segmentSumFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
auto stream = context->getCudaStream();
sd::LongType numClasses = indices->e<sd::LongType>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
hipLaunchKernelGGL(( segmentSumLinearKernel<T, I>), dim3(numClasses), dim3(input->lengthOf()), numClasses * 32 + 32, *stream,
input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(),
output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
hipLaunchKernelGGL(( segmentSumTadKernel<T, I>), dim3(input->sizeAt(0)), dim3(512), 2048, *stream,
input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets,
reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(),
output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void segmentSumFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentSumFunctor_, (context, input, indices, output),
SD_NUMERIC_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentSumFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices,
sd::LongType numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numOfClasses, indices->lengthOf(), (numOfClasses + 1) * 64);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
hipLaunchKernelGGL(( unsortedSegmentSumLinearKernel<T, I>), dim3(dims.x), dim3(dims.y), dims.z, *stream,
input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(),
begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
} else {
output->assign(0);
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
hipLaunchKernelGGL(( segmentSumTadKernel<T, I>), dim3(dims.x), dim3(dims.y), dims.z, *stream,
input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets,
reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(),
output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentSumFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses,
NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentSumFunctor_,
(context, input, indices, numOfClasses, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
// Backpropagate ops
// -------------------------------------------------------------------------------------------------------------- //
// Sorted sum backpropagate
template <typename T, typename I>
static SD_KERNEL void segmentSumBPLinearKernel(const void* inputBuf, const sd::LongType* inputShape, const void* eps,
const sd::LongType* epsShape, const void* indicesBuf,
const sd::LongType* indicesShape, void* outputBuf,
const sd::LongType* outputShape) {
auto x = reinterpret_cast<const T*>(inputBuf);
auto y = reinterpret_cast<const I*>(indicesBuf);
auto z = reinterpret_cast<T*>(outputBuf);
auto gradOut = reinterpret_cast<const T*>(eps);
__shared__ sd::LongType xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape);
auto xOffset = shape::getIndexOffset(e, inputShape);
auto yOffset = shape::getIndexOffset(e, indicesShape);
auto classIndex = y[yOffset];
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape);
z[zOffset] = gradOut[gradOffsetO];
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void segmentSumBPTadKernel(const void* inputBuf, const sd::LongType* inputShape, const void* eps,
const sd::LongType* epsShape, const void* indicesBuf,
const sd::LongType* indicesShape, void* outputBuf,
const sd::LongType* outputShape, const sd::LongType* inputTad,
const sd::LongType* inputOffsets, const sd::LongType* gradOutTad,
const sd::LongType* gradOutOffsets, const sd::LongType* outTad,
const sd::LongType* outOffsets) {
__shared__ const T* x;
__shared__ const T* gradOut;
__shared__ const I* y;
__shared__ T* z;
__shared__ sd::LongType xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<const T*>(inputBuf);
y = reinterpret_cast<const I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<const T*>(eps);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
auto yIndex = shape::getIndexOffset(i, indicesShape);
auto segment = y[yIndex];
auto currentOut = z + outOffsets[i];
auto outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
currentOut[e] = outGrad[e];
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
sd::Status segmentSumFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
if (input->isVector()) {
sd::LongType loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1);
hipLaunchKernelGGL(( segmentSumBPLinearKernel<T, I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream,
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
auto gradOutTads = packGradOut.specialShapeInfo();
auto gradOutTadOffsets = packGradOut.specialOffsets();
hipLaunchKernelGGL(( segmentSumBPTadKernel<T, I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream,
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return sd::Status::OK;
}
// -------------------------------------------------------------------------------------------------------------- //
sd::Status segmentSumFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentSumFunctorBP_,
(context, input, indices, gradOut, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
template <typename T, typename I>
static sd::Status unsortedSegmentSumFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices,
NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
if (input->isVector()) {
sd::LongType loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1);
hipLaunchKernelGGL(( segmentSumBPLinearKernel<T, I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream,
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
auto gradOutTads = packGradOut.specialShapeInfo();
auto gradOutTadOffsets = packGradOut.specialOffsets();
hipLaunchKernelGGL(( segmentSumBPTadKernel<T, I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream,
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return sd::Status::OK;
}
// -------------------------------------------------------------------------------------------------------------- //
sd::Status unsortedSegmentSumFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
sd::LongType numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentSumFunctorBP_,
(context, input, indices, gradOut, numOfClasses, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
} // namespace helpers
} // namespace ops
} // namespace sd
| db7dc6d79d78b45c39a995d708c6ba4e7d1b451c.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <sgazeos@gmail.com>
//
#include <array/NDArrayFactory.h>
#include <exceptions/cuda_exception.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
namespace sd {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Segment ops linear kernels
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void segmentSumLinearKernel(const void* input, const sd::LongType* inputShape, int* starts,
int* lengths, sd::LongType numOfClasses, void* output,
const sd::LongType* outputShape) {
__shared__ T* val;
__shared__ sd::LongType xLen, zLen, segment, zIndex;
__shared__ const T* x;
__shared__ T* z;
__shared__ int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
segment = blockIdx.x / threadsPerSegment;
x = reinterpret_cast<const T*>(input);
z = reinterpret_cast<T*>(output);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
start = starts[segment];
finish = start + lengths[segment];
// val[segment] = ;
z[zIndex] = x[shape::getIndexOffset(start, inputShape)];
}
}
__syncthreads();
for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
sd::math::atomics::sd_atomicAdd(&z[zIndex], x[xIndex]);
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void unsortedSegmentSumLinearKernel(const void* input, const sd::LongType* inputShape,
const void* indices, const sd::LongType* indicesShape, int* starts,
int* lengths, sd::LongType numOfClasses, void* output,
const sd::LongType* outputShape) {
__shared__ T* val;
__shared__ sd::LongType xLen, zLen, segment, zIndex;
__shared__ const T* x;
__shared__ T* z;
__shared__ const I* y; // int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
segment = blockIdx.x;
x = reinterpret_cast<const T*>(input);
z = reinterpret_cast<T*>(output);
y = reinterpret_cast<const I*>(indices);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
zIndex = shape::getIndexOffset(segment, outputShape);
if (lengths[segment] > 0)
z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)];
else
z[zIndex] = 0; // DataTypeUtils::max<T>();
}
__syncthreads();
if (lengths[segment] > 0)
for (auto e = threadIdx.x; e < xLen; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
auto yIndex = shape::getIndexOffset(e, indicesShape);
if (y[yIndex] == segment && e != starts[segment]) {
sd::math::atomics::sd_atomicAdd(&z[zIndex], x[xIndex]);
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// SegmentSum kernel
template <typename T, typename I>
static SD_KERNEL void segmentSumTadKernel(const void* inputBuf, const sd::LongType* inputShape,
const sd::LongType* inputTads, const sd::LongType* inputTadOffsets,
const I* indices, int* starts, int* lengths, sd::LongType numOfClasses,
void* outputBuf, const sd::LongType* outputShape,
const sd::LongType* outputTads, const sd::LongType* outputTadOffsets) {
__shared__ T* val;
__shared__ sd::LongType len, zIndex, total;
__shared__ T* z;
__shared__ int start, finish;
if (threadIdx.x == 0) {
auto segment = indices[blockIdx.x]; // / threadsPerSegment;
z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment];
len = shape::length(inputTads);
start = starts[segment];
finish = start + lengths[segment];
total = shape::sizeAt(inputShape, 0);
}
__syncthreads();
auto idx = blockIdx.x;
if (blockIdx.x <= total) {
auto x = reinterpret_cast<const T*>(inputBuf) + inputTadOffsets[idx];
if (blockIdx.x == start) {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
sd::math::atomics::sd_atomicAdd(&z[zIndex], x[xIndex]);
}
} else {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
if (lengths[indices[idx]]) sd::math::atomics::sd_atomicAdd(&z[zIndex], x[xIndex]);
}
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void segmentSumFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
auto stream = context->getCudaStream();
sd::LongType numClasses = indices->e<sd::LongType>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
segmentSumLinearKernel<T, I><<<numClasses, input->lengthOf(), numClasses * 32 + 32, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(),
output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
segmentSumTadKernel<T, I><<<input->sizeAt(0), 512, 2048, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets,
reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(),
output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void segmentSumFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentSumFunctor_, (context, input, indices, output),
SD_NUMERIC_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentSumFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices,
sd::LongType numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numOfClasses, indices->lengthOf(), (numOfClasses + 1) * 64);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
unsortedSegmentSumLinearKernel<T, I><<<dims.x, dims.y, dims.z, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(),
begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
} else {
output->assign(0);
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
segmentSumTadKernel<T, I><<<dims.x, dims.y, dims.z, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets,
reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(),
output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentSumFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses,
NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentSumFunctor_,
(context, input, indices, numOfClasses, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
// Backpropagate ops
// -------------------------------------------------------------------------------------------------------------- //
// Sorted sum backpropagate
template <typename T, typename I>
static SD_KERNEL void segmentSumBPLinearKernel(const void* inputBuf, const sd::LongType* inputShape, const void* eps,
const sd::LongType* epsShape, const void* indicesBuf,
const sd::LongType* indicesShape, void* outputBuf,
const sd::LongType* outputShape) {
auto x = reinterpret_cast<const T*>(inputBuf);
auto y = reinterpret_cast<const I*>(indicesBuf);
auto z = reinterpret_cast<T*>(outputBuf);
auto gradOut = reinterpret_cast<const T*>(eps);
__shared__ sd::LongType xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape);
auto xOffset = shape::getIndexOffset(e, inputShape);
auto yOffset = shape::getIndexOffset(e, indicesShape);
auto classIndex = y[yOffset];
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape);
z[zOffset] = gradOut[gradOffsetO];
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static SD_KERNEL void segmentSumBPTadKernel(const void* inputBuf, const sd::LongType* inputShape, const void* eps,
const sd::LongType* epsShape, const void* indicesBuf,
const sd::LongType* indicesShape, void* outputBuf,
const sd::LongType* outputShape, const sd::LongType* inputTad,
const sd::LongType* inputOffsets, const sd::LongType* gradOutTad,
const sd::LongType* gradOutOffsets, const sd::LongType* outTad,
const sd::LongType* outOffsets) {
__shared__ const T* x;
__shared__ const T* gradOut;
__shared__ const I* y;
__shared__ T* z;
__shared__ sd::LongType xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<const T*>(inputBuf);
y = reinterpret_cast<const I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<const T*>(eps);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
auto yIndex = shape::getIndexOffset(i, indicesShape);
auto segment = y[yIndex];
auto currentOut = z + outOffsets[i];
auto outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
currentOut[e] = outGrad[e];
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
sd::Status segmentSumFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
if (input->isVector()) {
sd::LongType loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1);
segmentSumBPLinearKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
auto gradOutTads = packGradOut.specialShapeInfo();
auto gradOutTadOffsets = packGradOut.specialOffsets();
segmentSumBPTadKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return sd::Status::OK;
}
// -------------------------------------------------------------------------------------------------------------- //
sd::Status segmentSumFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentSumFunctorBP_,
(context, input, indices, gradOut, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
template <typename T, typename I>
static sd::Status unsortedSegmentSumFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices,
NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
if (input->isVector()) {
sd::LongType loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); // indices->e<sd::LongType>(loop_size - 1);
segmentSumBPLinearKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
} else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
auto gradOutTads = packGradOut.specialShapeInfo();
auto gradOutTadOffsets = packGradOut.specialOffsets();
segmentSumBPTadKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(
input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return sd::Status::OK;
}
// -------------------------------------------------------------------------------------------------------------- //
sd::Status unsortedSegmentSumFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut,
sd::LongType numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentSumFunctorBP_,
(context, input, indices, gradOut, numOfClasses, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
} // namespace helpers
} // namespace ops
} // namespace sd
|
4fcf8021d39eeee405f52ca87daa43800ebefd67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/kernels/funcs/math_function.h"
using paddle::platform::float16;
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
namespace paddle {
namespace operators {
// CUDA: index helpers
#define idx4_4(index, d1, d2, d3, d4) (index % d4)
#define idx4_3(index, d1, d2, d3, d4) ((index / d4) % d3)
#define idx4_2(index, d1, d2, d3, d4) ((index / d4 / d3) % d2)
#define idx4_1(index, d1, d2, d3, d4) ((index / d4 / d3 / d2) % d1)
template <typename T>
__device__ bool GT_E(T a, T b) {
return (a > b) || Eigen::numext::abs(a - b) < 1e-4;
}
template <typename T>
__device__ bool LT_E(T a, T b) {
return (a < b) || Eigen::numext::abs(a - b) < 1e-4;
}
template <typename T>
__device__ bool GT(T a, T b) {
return (a - b) > 1e-4;
}
template <typename T>
__device__ T max(T a, T b) {
return a > b ? a : b;
}
template <typename T>
__device__ T min(T a, T b) {
return a < b ? a : b;
}
/*
* check if (x, y) is in the boundary of roi
*/
template <typename T>
__device__ bool in_quad(T x, T y, T roi_x[], T roi_y[]) {
for (int i = 0; i < 4; i++) {
T start_w = roi_x[i];
T start_h = roi_y[i];
T end_w = roi_x[(i + 1) % 4];
T end_h = roi_y[(i + 1) % 4];
if (fabs(start_h - end_h) < 1e-4) {
if (fabs(y - start_h) < 1e-4 && fabs(y - end_h) < 1e-4 &&
GT_E<T>(x, min<T>(start_w, end_w)) &&
LT_E<T>(x, max<T>(start_w, end_w))) {
return true;
}
} else {
T intersec_x =
(y - start_h) * (end_w - start_w) / (end_h - start_h) + start_w;
if (fabs(intersec_x - x) < 1e-4 && GT_E(y, min<T>(start_h, end_h)) &&
LT_E<T>(y, max<T>(start_h, end_h))) {
return true;
}
}
}
int n_cross = 0;
for (int i = 0; i < 4; i++) {
T start_w = roi_x[i];
T start_h = roi_y[i];
T end_w = roi_x[(i + 1) % 4];
T end_h = roi_y[(i + 1) % 4];
if (fabs(start_h - end_h) < 1e-4) {
continue;
}
if (LT_E<T>(y, min<T>(start_h, end_h)) ||
GT<T>(y, max<T>(start_h, end_h))) {
continue;
}
T intersec_x =
(y - start_h) * (end_w - start_w) / (end_h - start_h) + start_w;
if (fabs(intersec_x - x) < 1e-4) {
return true;
}
if (GT<T>(intersec_x, x)) {
n_cross++;
}
}
return (n_cross % 2 == 1);
}
/**
* Perform bilinear interpolation in the input feature map.
*/
template <typename T>
__device__ void bilinear_interpolate(const T* in_data, const int channels,
const int width, const int height,
int in_n, int in_c, T in_w, T in_h, T* val,
int out_idx, int* out2in_idx,
T* out2in_w) {
// Deal with cases that source coords are out of feature map boundary
if (GT_E<T>(-0.5, in_w) || GT_E<T>(in_w, width - 0.5) ||
GT_E<T>(-0.5, in_h) || GT_E<T>(in_h, height - 0.5)) {
val[0] = 0.0;
return;
}
if (GT_E<T>(0, in_w)) {
in_w = 0;
}
if (GT_E<T>(0, in_h)) {
in_h = 0;
}
int in_w_floor = floor(in_w);
int in_h_floor = floor(in_h);
int in_w_ceil;
int in_h_ceil;
if (GT_E<T>(in_w_floor, width - 1)) {
in_w_ceil = in_w_floor = width - 1;
in_w = static_cast<T>(in_w_floor);
} else {
in_w_ceil = in_w_floor + 1;
}
if (GT_E<T>(in_h_floor, height - 1)) {
in_h_ceil = in_h_floor = height - 1;
in_h = static_cast<T>(in_h_floor);
} else {
in_h_ceil = in_h_floor + 1;
}
T w_floor = in_w - in_w_floor;
T h_floor = in_h - in_h_floor;
T w_ceil = 1 - w_floor;
T h_ceil = 1 - h_floor;
const T* data = in_data + (in_n * channels + in_c) * height * width;
// Do bilinear interpolation
T v1 = data[in_h_floor * width + in_w_floor];
T v2 = data[in_h_ceil * width + in_w_floor];
T v3 = data[in_h_ceil * width + in_w_ceil];
T v4 = data[in_h_floor * width + in_w_ceil];
T w1 = w_ceil * h_ceil;
T w2 = w_ceil * h_floor;
T w3 = w_floor * h_floor;
T w4 = w_floor * h_ceil;
val[0] = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4;
int base_idx = (in_n * channels + in_c) * height * width;
out2in_idx[out_idx * 4] = base_idx + in_h_floor * width + in_w_floor;
out2in_idx[out_idx * 4 + 1] = base_idx + in_h_ceil * width + in_w_floor;
out2in_idx[out_idx * 4 + 2] = base_idx + in_h_ceil * width + in_w_ceil;
out2in_idx[out_idx * 4 + 3] = base_idx + in_h_floor * width + in_w_ceil;
out2in_w[out_idx * 4] = w1;
out2in_w[out_idx * 4 + 1] = w2;
out2in_w[out_idx * 4 + 2] = w3;
out2in_w[out_idx * 4 + 3] = w4;
}
/**
* Get the source coordinates in the input feature map.
*
* (u, v, w)^matrix = T * (out_w, out_h, 1)^matrix
*
* in_w = u / w
* in_h = v / w
*
*/
template <typename T>
__device__ void get_source_coords(T matrix[], int out_w, int out_h, T* in_w,
T* in_h) {
T u = matrix[0] * out_w + matrix[1] * out_h + matrix[2];
T v = matrix[3] * out_w + matrix[4] * out_h + matrix[5];
T w = matrix[6] * out_w + matrix[7] * out_h + matrix[8];
in_w[0] = u / w;
in_h[0] = v / w;
}
/**
* Get the matrix of perspective transform.
*
* dx1 = x1 - x2
* dx2 = x3 - x2
* dx3 = x0 - x1 + x2 - x3
* dy1 = y1 - y2
* dy2 = y3 - y2
* dy3 = y0 - y1 + y2 - y3
*
* a11 = (x1 - x0 + a31 * (w - 1) * x1) / (w - 1)
* a12 = (x3 - x0 + a32 * (h - 1) * x3) / (h - 1)
* a13 = x0
* a21 = (y1 - y0 + a31 * (w - 1) * y1) / (w - 1)
* a22 = (y3 - y0 + a32 * (h - 1) * y3) / (h - 1)
* a23 = y0
* a31 = (dx3 * dy2 - dx2 * dy3) / (dx1 * dy2 - dx2 * dy1) / (w - 1)
* a32 = (dx1 * dy3 - dx3 * dy1) / (dx1 * dy2 - dx2 * dy1) / (h - 1)
* a33 = 1
*
*/
template <typename T>
__device__ void get_transform_matrix(const int transformed_width,
const int transformed_height, T roi_x[],
T roi_y[], T matrix[]) {
T x0 = roi_x[0];
T x1 = roi_x[1];
T x2 = roi_x[2];
T x3 = roi_x[3];
T y0 = roi_y[0];
T y1 = roi_y[1];
T y2 = roi_y[2];
T y3 = roi_y[3];
// Estimate the height and width of RoI
T len1 = sqrt((x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1));
T len2 = sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2));
T len3 = sqrt((x2 - x3) * (x2 - x3) + (y2 - y3) * (y2 - y3));
T len4 = sqrt((x3 - x0) * (x3 - x0) + (y3 - y0) * (y3 - y0));
T estimated_height = (len2 + len4) / 2.0;
T estimated_width = (len1 + len3) / 2.0;
// Get the normalized height and normalized width
int normalized_height = max(2, transformed_height);
int normalized_width =
round(estimated_width * (normalized_height - 1) / estimated_height) + 1;
normalized_width = max(2, min(normalized_width, transformed_width));
T dx1 = x1 - x2;
T dx2 = x3 - x2;
T dx3 = x0 - x1 + x2 - x3;
T dy1 = y1 - y2;
T dy2 = y3 - y2;
T dy3 = y0 - y1 + y2 - y3;
matrix[6] = (dx3 * dy2 - dx2 * dy3) / (dx1 * dy2 - dx2 * dy1 + 1e-5) /
(normalized_width - 1);
matrix[7] = (dx1 * dy3 - dx3 * dy1) / (dx1 * dy2 - dx2 * dy1 + 1e-5) /
(normalized_height - 1);
matrix[8] = 1;
matrix[3] = (y1 - y0 + matrix[6] * (normalized_width - 1) * y1) /
(normalized_width - 1);
matrix[4] = (y3 - y0 + matrix[7] * (normalized_height - 1) * y3) /
(normalized_height - 1);
matrix[5] = y0;
matrix[0] = (x1 - x0 + matrix[6] * (normalized_width - 1) * x1) /
(normalized_width - 1);
matrix[1] = (x3 - x0 + matrix[7] * (normalized_height - 1) * x3) /
(normalized_height - 1);
matrix[2] = x0;
}
template <typename T>
__global__ void RoiTransformKernel(const float* input_data,
const float* rois_data,
const int* roi2image_data, int num_rois,
int in_height, int in_width, int channels,
int transformed_height,
int transformed_width, float spatial_scale,
T* output_data, int* out2in_idx, T* out2in_w,
int* mask, T* transform_matrix) {
int output_size =
num_rois * transformed_height * transformed_width * channels;
CUDA_KERNEL_LOOP(index, output_size) {
// (n, c, out_h, out_w) is an element in the transformed output
int out_w = idx4_4(index, num_rois, channels, transformed_height,
transformed_width);
int out_h = idx4_3(index, num_rois, channels, transformed_height,
transformed_width);
int c = idx4_2(index, num_rois, channels, transformed_height,
transformed_width);
int n = idx4_1(index, num_rois, channels, transformed_height,
transformed_width);
auto bottom_rois = rois_data + n * 8;
int roi_batch_ind = bottom_rois[0];
T roi_x[4];
T roi_y[4];
for (int k = 0; k < 4; ++k) {
roi_x[k] = bottom_rois[2 * k] * spatial_scale;
roi_y[k] = bottom_rois[2 * k + 1] * spatial_scale;
}
// Get transform matrix
T matrix[9];
get_transform_matrix<T>(transformed_width, transformed_height, roi_x, roi_y,
matrix);
for (int i = 0; i < 9; i++) {
transform_matrix[n * 9 + i] = matrix[i];
}
// Get source coords
T in_w;
T in_h;
get_source_coords<T>(matrix, out_w, out_h, &in_w, &in_h);
if (in_quad<T>(in_w, in_h, roi_x, roi_y)) {
if (GT_E<T>(-0.5, in_w) ||
GT_E<T>(in_w, static_cast<T>(in_width - 0.5)) ||
GT_E<T>(-0.5, in_h) ||
GT_E<T>(in_h, static_cast<T>(in_height - 0.5))) {
// Skip if source coords is not in input image
output_data[index] = 0.0;
mask[(n * transformed_height + out_h) * transformed_width + out_w] = 0;
} else {
// Perform bilinear interpolation
int in_n = roi2image_data[n];
bilinear_interpolate<T>(input_data, channels, in_width, in_height, in_n,
c, in_w, in_h, output_data + index, index,
out2in_idx, out2in_w);
mask[(n * transformed_height + out_h) * transformed_width + out_w] = 1;
}
} else {
// Skip if source coords is not in quad
output_data[index] = 0.0;
mask[(n * transformed_height + out_h) * transformed_width + out_w] = 0;
}
}
}
template <typename T>
class CUDAROIPerspectiveTransformOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<framework::Tensor>("X");
auto* rois = ctx.Input<framework::LoDTensor>("ROIs");
auto* out = ctx.Output<framework::Tensor>("Out");
auto* out2in_idx = ctx.Output<framework::Tensor>("Out2InIdx");
auto* out2in_w = ctx.Output<framework::Tensor>("Out2InWeights");
auto* mask = ctx.Output<framework::Tensor>("Mask");
auto* out_transform_matrix =
ctx.Output<framework::Tensor>("TransformMatrix");
int* mask_data = mask->mutable_data<int>(ctx.GetPlace());
int* out2in_idx_data =
out2in_idx->mutable_data<int>({out->numel(), 4}, ctx.GetPlace());
T* out2in_w_data =
out2in_w->mutable_data<T>({out->numel(), 4}, ctx.GetPlace());
phi::funcs::SetConstant<platform::CUDADeviceContext, int> init;
init(ctx.cuda_device_context(), out2in_idx, static_cast<int>(-1));
auto transformed_height = ctx.Attr<int>("transformed_height");
auto transformed_width = ctx.Attr<int>("transformed_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int in_height = in_dims[2];
int in_width = in_dims[3];
int rois_num = rois->dims()[0];
const T* input_data = in->data<T>();
T* output_data = out->mutable_data<T>(ctx.GetPlace());
const T* rois_data = rois->data<T>();
framework::Tensor roi2image;
framework::Tensor roi2image_dev;
roi2image.Resize({rois_num});
int* roi2image_data = roi2image.mutable_data<int>(platform::CPUPlace());
auto lod = rois->lod().back();
for (size_t i = 0; i < lod.size() - 1; ++i) {
for (size_t j = lod[i]; j < lod[i + 1]; ++j) {
roi2image_data[j] = i;
}
}
paddle::framework::TensorCopySync(roi2image, ctx.GetPlace(),
&roi2image_dev);
int out_size = rois_num * transformed_height * transformed_width * channels;
auto stream = ctx.cuda_device_context().stream();
int block = 512;
int grid = (out_size + block - 1) / block;
// Get transform matrix
T* matrix =
out_transform_matrix->mutable_data<T>({rois_num, 9}, ctx.GetPlace());
hipLaunchKernelGGL(( RoiTransformKernel<T>), dim3(grid), dim3(block), 0, stream,
input_data, rois_data, roi2image_dev.data<int>(), rois_num, in_height,
in_width, channels, transformed_height, transformed_width,
spatial_scale, output_data, out2in_idx_data, out2in_w_data, mask_data,
matrix);
}
};
template <typename T>
__device__ T get_feature_gradient(T xs, T ys, int w, int h, const int width,
const int height) {
if (GT_E<T>(-0.5, xs) || GT_E<T>(xs, width - 0.5) || GT_E<T>(-0.5, ys) ||
GT_E<T>(ys, height - 0.5)) {
return 0;
}
if (GT_E<T>(0, xs)) {
xs = 0;
}
if (GT_E<T>(0, ys)) {
ys = 0;
}
int xs_floor = floor(xs);
int ys_floor = floor(ys);
int xs_ceil;
int ys_ceil;
if (GT_E<T>(xs_floor, width - 1)) {
xs_ceil = xs_floor = width - 1;
xs = static_cast<T>(xs_floor);
} else {
xs_ceil = xs_floor + 1;
}
if (GT_E(ys_floor, height - 1)) {
ys_ceil = ys_floor = height - 1;
ys = static_cast<T>(ys_floor);
} else {
ys_ceil = ys_floor + 1;
}
T weight = 0;
if (w == xs_floor) {
if (h == ys_floor) {
weight = (w + 1 - xs) * (h + 1 - ys);
} else if (h == ys_ceil) {
weight = (w + 1 - xs) * (ys + 1 - h);
}
} else if (w == xs_ceil) {
if (h == ys_floor) {
weight = (xs + 1 - w) * (h + 1 - ys);
} else if (h == ys_ceil) {
weight = (xs + 1 - w) * (ys + 1 - h);
}
}
return weight;
}
template <typename T>
__global__ void RoiTransformGradKernel(int out_size, const int* out2in_idx_data,
const T* out2in_w_data,
const T* out_grad_data,
T* in_grad_data) {
CUDA_KERNEL_LOOP(index, out_size * 4) {
int in_idx = out2in_idx_data[index];
if (in_idx >= 0) {
int out_idx = index / 4;
atomicAdd(in_grad_data + in_idx,
out_grad_data[out_idx] * out2in_w_data[index]);
}
}
}
template <typename T>
class CUDAROIPerspectiveTransformGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* out2in_idx = ctx.Input<framework::LoDTensor>("Out2InIdx");
auto* out2in_w = ctx.Input<framework::LoDTensor>("Out2InWeights");
auto* out_grad =
ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* in_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
T* in_grad_data = in_grad->mutable_data<T>(ctx.GetPlace());
phi::funcs::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(ctx.cuda_device_context(), in_grad, static_cast<T>(0));
const T* out_grad_data = out_grad->data<T>();
const int* out2in_idx_data = out2in_idx->data<int>();
const T* out2in_w_data = out2in_w->data<T>();
int out_size = out_grad->numel();
auto stream = ctx.cuda_device_context().stream();
int block = 512;
int grid = (out_size * 4 + block - 1) / block;
hipLaunchKernelGGL(( RoiTransformGradKernel<T>), dim3(grid), dim3(block), 0, stream,
out_size, out2in_idx_data, out2in_w_data, out_grad_data, in_grad_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(roi_perspective_transform,
ops::CUDAROIPerspectiveTransformOpKernel<float>);
REGISTER_OP_CUDA_KERNEL(roi_perspective_transform_grad,
ops::CUDAROIPerspectiveTransformGradOpKernel<float>);
| 4fcf8021d39eeee405f52ca87daa43800ebefd67.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/phi/kernels/funcs/math_function.h"
using paddle::platform::float16;
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
namespace paddle {
namespace operators {
// CUDA: index helpers
#define idx4_4(index, d1, d2, d3, d4) (index % d4)
#define idx4_3(index, d1, d2, d3, d4) ((index / d4) % d3)
#define idx4_2(index, d1, d2, d3, d4) ((index / d4 / d3) % d2)
#define idx4_1(index, d1, d2, d3, d4) ((index / d4 / d3 / d2) % d1)
template <typename T>
__device__ bool GT_E(T a, T b) {
return (a > b) || Eigen::numext::abs(a - b) < 1e-4;
}
template <typename T>
__device__ bool LT_E(T a, T b) {
return (a < b) || Eigen::numext::abs(a - b) < 1e-4;
}
template <typename T>
__device__ bool GT(T a, T b) {
return (a - b) > 1e-4;
}
template <typename T>
__device__ T max(T a, T b) {
return a > b ? a : b;
}
template <typename T>
__device__ T min(T a, T b) {
return a < b ? a : b;
}
/*
* check if (x, y) is in the boundary of roi
*/
template <typename T>
__device__ bool in_quad(T x, T y, T roi_x[], T roi_y[]) {
for (int i = 0; i < 4; i++) {
T start_w = roi_x[i];
T start_h = roi_y[i];
T end_w = roi_x[(i + 1) % 4];
T end_h = roi_y[(i + 1) % 4];
if (fabs(start_h - end_h) < 1e-4) {
if (fabs(y - start_h) < 1e-4 && fabs(y - end_h) < 1e-4 &&
GT_E<T>(x, min<T>(start_w, end_w)) &&
LT_E<T>(x, max<T>(start_w, end_w))) {
return true;
}
} else {
T intersec_x =
(y - start_h) * (end_w - start_w) / (end_h - start_h) + start_w;
if (fabs(intersec_x - x) < 1e-4 && GT_E(y, min<T>(start_h, end_h)) &&
LT_E<T>(y, max<T>(start_h, end_h))) {
return true;
}
}
}
int n_cross = 0;
for (int i = 0; i < 4; i++) {
T start_w = roi_x[i];
T start_h = roi_y[i];
T end_w = roi_x[(i + 1) % 4];
T end_h = roi_y[(i + 1) % 4];
if (fabs(start_h - end_h) < 1e-4) {
continue;
}
if (LT_E<T>(y, min<T>(start_h, end_h)) ||
GT<T>(y, max<T>(start_h, end_h))) {
continue;
}
T intersec_x =
(y - start_h) * (end_w - start_w) / (end_h - start_h) + start_w;
if (fabs(intersec_x - x) < 1e-4) {
return true;
}
if (GT<T>(intersec_x, x)) {
n_cross++;
}
}
return (n_cross % 2 == 1);
}
/**
* Perform bilinear interpolation in the input feature map.
*/
template <typename T>
__device__ void bilinear_interpolate(const T* in_data, const int channels,
const int width, const int height,
int in_n, int in_c, T in_w, T in_h, T* val,
int out_idx, int* out2in_idx,
T* out2in_w) {
// Deal with cases that source coords are out of feature map boundary
if (GT_E<T>(-0.5, in_w) || GT_E<T>(in_w, width - 0.5) ||
GT_E<T>(-0.5, in_h) || GT_E<T>(in_h, height - 0.5)) {
val[0] = 0.0;
return;
}
if (GT_E<T>(0, in_w)) {
in_w = 0;
}
if (GT_E<T>(0, in_h)) {
in_h = 0;
}
int in_w_floor = floor(in_w);
int in_h_floor = floor(in_h);
int in_w_ceil;
int in_h_ceil;
if (GT_E<T>(in_w_floor, width - 1)) {
in_w_ceil = in_w_floor = width - 1;
in_w = static_cast<T>(in_w_floor);
} else {
in_w_ceil = in_w_floor + 1;
}
if (GT_E<T>(in_h_floor, height - 1)) {
in_h_ceil = in_h_floor = height - 1;
in_h = static_cast<T>(in_h_floor);
} else {
in_h_ceil = in_h_floor + 1;
}
T w_floor = in_w - in_w_floor;
T h_floor = in_h - in_h_floor;
T w_ceil = 1 - w_floor;
T h_ceil = 1 - h_floor;
const T* data = in_data + (in_n * channels + in_c) * height * width;
// Do bilinear interpolation
T v1 = data[in_h_floor * width + in_w_floor];
T v2 = data[in_h_ceil * width + in_w_floor];
T v3 = data[in_h_ceil * width + in_w_ceil];
T v4 = data[in_h_floor * width + in_w_ceil];
T w1 = w_ceil * h_ceil;
T w2 = w_ceil * h_floor;
T w3 = w_floor * h_floor;
T w4 = w_floor * h_ceil;
val[0] = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4;
int base_idx = (in_n * channels + in_c) * height * width;
out2in_idx[out_idx * 4] = base_idx + in_h_floor * width + in_w_floor;
out2in_idx[out_idx * 4 + 1] = base_idx + in_h_ceil * width + in_w_floor;
out2in_idx[out_idx * 4 + 2] = base_idx + in_h_ceil * width + in_w_ceil;
out2in_idx[out_idx * 4 + 3] = base_idx + in_h_floor * width + in_w_ceil;
out2in_w[out_idx * 4] = w1;
out2in_w[out_idx * 4 + 1] = w2;
out2in_w[out_idx * 4 + 2] = w3;
out2in_w[out_idx * 4 + 3] = w4;
}
/**
* Get the source coordinates in the input feature map.
*
* (u, v, w)^matrix = T * (out_w, out_h, 1)^matrix
*
* in_w = u / w
* in_h = v / w
*
*/
template <typename T>
__device__ void get_source_coords(T matrix[], int out_w, int out_h, T* in_w,
T* in_h) {
T u = matrix[0] * out_w + matrix[1] * out_h + matrix[2];
T v = matrix[3] * out_w + matrix[4] * out_h + matrix[5];
T w = matrix[6] * out_w + matrix[7] * out_h + matrix[8];
in_w[0] = u / w;
in_h[0] = v / w;
}
/**
* Get the matrix of perspective transform.
*
* dx1 = x1 - x2
* dx2 = x3 - x2
* dx3 = x0 - x1 + x2 - x3
* dy1 = y1 - y2
* dy2 = y3 - y2
* dy3 = y0 - y1 + y2 - y3
*
* a11 = (x1 - x0 + a31 * (w - 1) * x1) / (w - 1)
* a12 = (x3 - x0 + a32 * (h - 1) * x3) / (h - 1)
* a13 = x0
* a21 = (y1 - y0 + a31 * (w - 1) * y1) / (w - 1)
* a22 = (y3 - y0 + a32 * (h - 1) * y3) / (h - 1)
* a23 = y0
* a31 = (dx3 * dy2 - dx2 * dy3) / (dx1 * dy2 - dx2 * dy1) / (w - 1)
* a32 = (dx1 * dy3 - dx3 * dy1) / (dx1 * dy2 - dx2 * dy1) / (h - 1)
* a33 = 1
*
*/
template <typename T>
__device__ void get_transform_matrix(const int transformed_width,
const int transformed_height, T roi_x[],
T roi_y[], T matrix[]) {
T x0 = roi_x[0];
T x1 = roi_x[1];
T x2 = roi_x[2];
T x3 = roi_x[3];
T y0 = roi_y[0];
T y1 = roi_y[1];
T y2 = roi_y[2];
T y3 = roi_y[3];
// Estimate the height and width of RoI
T len1 = sqrt((x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1));
T len2 = sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2));
T len3 = sqrt((x2 - x3) * (x2 - x3) + (y2 - y3) * (y2 - y3));
T len4 = sqrt((x3 - x0) * (x3 - x0) + (y3 - y0) * (y3 - y0));
T estimated_height = (len2 + len4) / 2.0;
T estimated_width = (len1 + len3) / 2.0;
// Get the normalized height and normalized width
int normalized_height = max(2, transformed_height);
int normalized_width =
round(estimated_width * (normalized_height - 1) / estimated_height) + 1;
normalized_width = max(2, min(normalized_width, transformed_width));
T dx1 = x1 - x2;
T dx2 = x3 - x2;
T dx3 = x0 - x1 + x2 - x3;
T dy1 = y1 - y2;
T dy2 = y3 - y2;
T dy3 = y0 - y1 + y2 - y3;
matrix[6] = (dx3 * dy2 - dx2 * dy3) / (dx1 * dy2 - dx2 * dy1 + 1e-5) /
(normalized_width - 1);
matrix[7] = (dx1 * dy3 - dx3 * dy1) / (dx1 * dy2 - dx2 * dy1 + 1e-5) /
(normalized_height - 1);
matrix[8] = 1;
matrix[3] = (y1 - y0 + matrix[6] * (normalized_width - 1) * y1) /
(normalized_width - 1);
matrix[4] = (y3 - y0 + matrix[7] * (normalized_height - 1) * y3) /
(normalized_height - 1);
matrix[5] = y0;
matrix[0] = (x1 - x0 + matrix[6] * (normalized_width - 1) * x1) /
(normalized_width - 1);
matrix[1] = (x3 - x0 + matrix[7] * (normalized_height - 1) * x3) /
(normalized_height - 1);
matrix[2] = x0;
}
template <typename T>
__global__ void RoiTransformKernel(const float* input_data,
const float* rois_data,
const int* roi2image_data, int num_rois,
int in_height, int in_width, int channels,
int transformed_height,
int transformed_width, float spatial_scale,
T* output_data, int* out2in_idx, T* out2in_w,
int* mask, T* transform_matrix) {
int output_size =
num_rois * transformed_height * transformed_width * channels;
CUDA_KERNEL_LOOP(index, output_size) {
// (n, c, out_h, out_w) is an element in the transformed output
int out_w = idx4_4(index, num_rois, channels, transformed_height,
transformed_width);
int out_h = idx4_3(index, num_rois, channels, transformed_height,
transformed_width);
int c = idx4_2(index, num_rois, channels, transformed_height,
transformed_width);
int n = idx4_1(index, num_rois, channels, transformed_height,
transformed_width);
auto bottom_rois = rois_data + n * 8;
int roi_batch_ind = bottom_rois[0];
T roi_x[4];
T roi_y[4];
for (int k = 0; k < 4; ++k) {
roi_x[k] = bottom_rois[2 * k] * spatial_scale;
roi_y[k] = bottom_rois[2 * k + 1] * spatial_scale;
}
// Get transform matrix
T matrix[9];
get_transform_matrix<T>(transformed_width, transformed_height, roi_x, roi_y,
matrix);
for (int i = 0; i < 9; i++) {
transform_matrix[n * 9 + i] = matrix[i];
}
// Get source coords
T in_w;
T in_h;
get_source_coords<T>(matrix, out_w, out_h, &in_w, &in_h);
if (in_quad<T>(in_w, in_h, roi_x, roi_y)) {
if (GT_E<T>(-0.5, in_w) ||
GT_E<T>(in_w, static_cast<T>(in_width - 0.5)) ||
GT_E<T>(-0.5, in_h) ||
GT_E<T>(in_h, static_cast<T>(in_height - 0.5))) {
// Skip if source coords is not in input image
output_data[index] = 0.0;
mask[(n * transformed_height + out_h) * transformed_width + out_w] = 0;
} else {
// Perform bilinear interpolation
int in_n = roi2image_data[n];
bilinear_interpolate<T>(input_data, channels, in_width, in_height, in_n,
c, in_w, in_h, output_data + index, index,
out2in_idx, out2in_w);
mask[(n * transformed_height + out_h) * transformed_width + out_w] = 1;
}
} else {
// Skip if source coords is not in quad
output_data[index] = 0.0;
mask[(n * transformed_height + out_h) * transformed_width + out_w] = 0;
}
}
}
template <typename T>
class CUDAROIPerspectiveTransformOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<framework::Tensor>("X");
auto* rois = ctx.Input<framework::LoDTensor>("ROIs");
auto* out = ctx.Output<framework::Tensor>("Out");
auto* out2in_idx = ctx.Output<framework::Tensor>("Out2InIdx");
auto* out2in_w = ctx.Output<framework::Tensor>("Out2InWeights");
auto* mask = ctx.Output<framework::Tensor>("Mask");
auto* out_transform_matrix =
ctx.Output<framework::Tensor>("TransformMatrix");
int* mask_data = mask->mutable_data<int>(ctx.GetPlace());
int* out2in_idx_data =
out2in_idx->mutable_data<int>({out->numel(), 4}, ctx.GetPlace());
T* out2in_w_data =
out2in_w->mutable_data<T>({out->numel(), 4}, ctx.GetPlace());
phi::funcs::SetConstant<platform::CUDADeviceContext, int> init;
init(ctx.cuda_device_context(), out2in_idx, static_cast<int>(-1));
auto transformed_height = ctx.Attr<int>("transformed_height");
auto transformed_width = ctx.Attr<int>("transformed_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int in_height = in_dims[2];
int in_width = in_dims[3];
int rois_num = rois->dims()[0];
const T* input_data = in->data<T>();
T* output_data = out->mutable_data<T>(ctx.GetPlace());
const T* rois_data = rois->data<T>();
framework::Tensor roi2image;
framework::Tensor roi2image_dev;
roi2image.Resize({rois_num});
int* roi2image_data = roi2image.mutable_data<int>(platform::CPUPlace());
auto lod = rois->lod().back();
for (size_t i = 0; i < lod.size() - 1; ++i) {
for (size_t j = lod[i]; j < lod[i + 1]; ++j) {
roi2image_data[j] = i;
}
}
paddle::framework::TensorCopySync(roi2image, ctx.GetPlace(),
&roi2image_dev);
int out_size = rois_num * transformed_height * transformed_width * channels;
auto stream = ctx.cuda_device_context().stream();
int block = 512;
int grid = (out_size + block - 1) / block;
// Get transform matrix
T* matrix =
out_transform_matrix->mutable_data<T>({rois_num, 9}, ctx.GetPlace());
RoiTransformKernel<T><<<grid, block, 0, stream>>>(
input_data, rois_data, roi2image_dev.data<int>(), rois_num, in_height,
in_width, channels, transformed_height, transformed_width,
spatial_scale, output_data, out2in_idx_data, out2in_w_data, mask_data,
matrix);
}
};
template <typename T>
__device__ T get_feature_gradient(T xs, T ys, int w, int h, const int width,
const int height) {
if (GT_E<T>(-0.5, xs) || GT_E<T>(xs, width - 0.5) || GT_E<T>(-0.5, ys) ||
GT_E<T>(ys, height - 0.5)) {
return 0;
}
if (GT_E<T>(0, xs)) {
xs = 0;
}
if (GT_E<T>(0, ys)) {
ys = 0;
}
int xs_floor = floor(xs);
int ys_floor = floor(ys);
int xs_ceil;
int ys_ceil;
if (GT_E<T>(xs_floor, width - 1)) {
xs_ceil = xs_floor = width - 1;
xs = static_cast<T>(xs_floor);
} else {
xs_ceil = xs_floor + 1;
}
if (GT_E(ys_floor, height - 1)) {
ys_ceil = ys_floor = height - 1;
ys = static_cast<T>(ys_floor);
} else {
ys_ceil = ys_floor + 1;
}
T weight = 0;
if (w == xs_floor) {
if (h == ys_floor) {
weight = (w + 1 - xs) * (h + 1 - ys);
} else if (h == ys_ceil) {
weight = (w + 1 - xs) * (ys + 1 - h);
}
} else if (w == xs_ceil) {
if (h == ys_floor) {
weight = (xs + 1 - w) * (h + 1 - ys);
} else if (h == ys_ceil) {
weight = (xs + 1 - w) * (ys + 1 - h);
}
}
return weight;
}
template <typename T>
__global__ void RoiTransformGradKernel(int out_size, const int* out2in_idx_data,
const T* out2in_w_data,
const T* out_grad_data,
T* in_grad_data) {
CUDA_KERNEL_LOOP(index, out_size * 4) {
int in_idx = out2in_idx_data[index];
if (in_idx >= 0) {
int out_idx = index / 4;
atomicAdd(in_grad_data + in_idx,
out_grad_data[out_idx] * out2in_w_data[index]);
}
}
}
template <typename T>
class CUDAROIPerspectiveTransformGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* out2in_idx = ctx.Input<framework::LoDTensor>("Out2InIdx");
auto* out2in_w = ctx.Input<framework::LoDTensor>("Out2InWeights");
auto* out_grad =
ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* in_grad = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
T* in_grad_data = in_grad->mutable_data<T>(ctx.GetPlace());
phi::funcs::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(ctx.cuda_device_context(), in_grad, static_cast<T>(0));
const T* out_grad_data = out_grad->data<T>();
const int* out2in_idx_data = out2in_idx->data<int>();
const T* out2in_w_data = out2in_w->data<T>();
int out_size = out_grad->numel();
auto stream = ctx.cuda_device_context().stream();
int block = 512;
int grid = (out_size * 4 + block - 1) / block;
RoiTransformGradKernel<T><<<grid, block, 0, stream>>>(
out_size, out2in_idx_data, out2in_w_data, out_grad_data, in_grad_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(roi_perspective_transform,
ops::CUDAROIPerspectiveTransformOpKernel<float>);
REGISTER_OP_CUDA_KERNEL(roi_perspective_transform_grad,
ops::CUDAROIPerspectiveTransformGradOpKernel<float>);
|
158a7d90d1c151228f0168dc456b2c7b3bcab0a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// function to add the elements of two arrays
// CUDA Kernel function to add the elements of two arrays on the GPU
__global__
void add(int n, float *x, float *y) {
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void) {
int N = 1<<20; // 1M elements
float *x; // = new float[N];
float *y; // = new float[N];
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int blockSize = 256;
//int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(1), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| 158a7d90d1c151228f0168dc456b2c7b3bcab0a1.cu | #include <iostream>
#include <math.h>
// function to add the elements of two arrays
// CUDA Kernel function to add the elements of two arrays on the GPU
__global__
void add(int n, float *x, float *y) {
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void) {
int N = 1<<20; // 1M elements
float *x; // = new float[N];
float *y; // = new float[N];
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int blockSize = 256;
//int numBlocks = (N + blockSize - 1) / blockSize;
add<<<1, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
c2ac861b6e792527cc36d306e332ec98c2212f84.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "median_reduce_shuffle_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *d_in = NULL;
hipMalloc(&d_in, XSIZE*YSIZE);
float *d_out = NULL;
hipMalloc(&d_out, XSIZE*YSIZE);
float *d_random_numbers = NULL;
hipMalloc(&d_random_numbers, XSIZE*YSIZE);
int n_in = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
median_reduce_shuffle_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out,d_random_numbers,n_in);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
median_reduce_shuffle_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out,d_random_numbers,n_in);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
median_reduce_shuffle_gpu), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_out,d_random_numbers,n_in);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c2ac861b6e792527cc36d306e332ec98c2212f84.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "median_reduce_shuffle_gpu.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *d_in = NULL;
cudaMalloc(&d_in, XSIZE*YSIZE);
float *d_out = NULL;
cudaMalloc(&d_out, XSIZE*YSIZE);
float *d_random_numbers = NULL;
cudaMalloc(&d_random_numbers, XSIZE*YSIZE);
int n_in = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
median_reduce_shuffle_gpu<<<gridBlock,threadBlock>>>(d_in,d_out,d_random_numbers,n_in);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
median_reduce_shuffle_gpu<<<gridBlock,threadBlock>>>(d_in,d_out,d_random_numbers,n_in);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
median_reduce_shuffle_gpu<<<gridBlock,threadBlock>>>(d_in,d_out,d_random_numbers,n_in);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5c61baae08d6388b7760dfb0016cdda3e8cff2e4.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/hip/HIPBlas.h>
namespace at { namespace native {
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
return legacy::cuda::_th_baddbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm_out");
return legacy::cuda::_th_baddbmm_out(result, b_self, batch1, batch2, beta, alpha);
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
return legacy::cuda::_th_addmm(b_self, mat1, mat2, beta, alpha);
}
Tensor& addmm_cuda_out(Tensor &result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return legacy::cuda::_th_addmm_out(result, b_self, mat1, mat2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor addbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm");
return legacy::cuda::_th_addbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& addbmm_cuda_out(Tensor& result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm_out");
return legacy::cuda::_th_addbmm_out(result, self, batch1, batch2, beta, alpha);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) });
return legacy::cuda::_th_bmm_out(result, batch1, batch2);
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({0}, self.options());
return native::bmm_out_cuda(result, self, mat2);
}
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
if ((tensor_strides[0] == 1) && (tensor_strides[1] != 0)) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] != 0)) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(
(mat1.dim() == 2) && (mat2.dim() == 2) &&
(self.dim() == 2) && (result.dim() == 2),
"tensors must be 2-D"
);
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self_sizes = self.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self_sizes[0] == mat1_sizes[0], "self dim 0 must match mat1 dim 0");
TORCH_CHECK(self_sizes[1] == mat2_sizes[1], "self dim 1 must match mat2 dim 1");
// If self and result either point to the same data or if beta is zero,
// we can avoid copying self into result. Otherwise, we need to copy.
if (beta.to<double>() != 0.0) {
if ((result.data_ptr() != self.data_ptr()) || (result.strides() != self.strides())) {
result.copy_(self);
}
}
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self.scalar_type();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
} }
| 5c61baae08d6388b7760dfb0016cdda3e8cff2e4.cu | #include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/cuda/CUDABlas.h>
namespace at { namespace native {
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
return legacy::cuda::_th_baddbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm_out");
return legacy::cuda::_th_baddbmm_out(result, b_self, batch1, batch2, beta, alpha);
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
return legacy::cuda::_th_addmm(b_self, mat1, mat2, beta, alpha);
}
Tensor& addmm_cuda_out(Tensor &result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return legacy::cuda::_th_addmm_out(result, b_self, mat1, mat2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor addbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm");
return legacy::cuda::_th_addbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& addbmm_cuda_out(Tensor& result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm_out");
return legacy::cuda::_th_addbmm_out(result, self, batch1, batch2, beta, alpha);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) });
return legacy::cuda::_th_bmm_out(result, batch1, batch2);
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({0}, self.options());
return native::bmm_out_cuda(result, self, mat2);
}
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
if ((tensor_strides[0] == 1) && (tensor_strides[1] != 0)) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] != 0)) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(
(mat1.dim() == 2) && (mat2.dim() == 2) &&
(self.dim() == 2) && (result.dim() == 2),
"tensors must be 2-D"
);
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self_sizes = self.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self_sizes[0] == mat1_sizes[0], "self dim 0 must match mat1 dim 0");
TORCH_CHECK(self_sizes[1] == mat2_sizes[1], "self dim 1 must match mat2 dim 1");
// If self and result either point to the same data or if beta is zero,
// we can avoid copying self into result. Otherwise, we need to copy.
if (beta.to<double>() != 0.0) {
if ((result.data_ptr() != self.data_ptr()) || (result.strides() != self.strides())) {
result.copy_(self);
}
}
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self.scalar_type();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
} }
|
8bf1a86d016986c5ed588b9512a0d705cce05c60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cmath>
#include <iostream>
#include <random/mvg.cuh>
#include <random>
#include "test_utils.h"
// mvg.h takes in matrices that are colomn major (as in fortan)
#define IDX2C(i, j, ld) (j * ld + i)
namespace MLCommon {
namespace Random {
// helper kernels
/// @todo Duplicate called vctwiseAccumulate in utils.h (Kalman Filters,
// i think that is much better to use., more general)
template <typename T>
__global__ void En_KF_accumulate(const int nPoints, const int dim, const T *X,
T *x) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int col = idx % dim;
int row = idx / dim;
if (col < dim && row < nPoints) myAtomicAdd(x + col, X[idx]);
}
template <typename T>
__global__ void En_KF_normalize(const int divider, const int dim, T *x) {
int xi = threadIdx.x + blockDim.x * blockIdx.x;
if (xi < dim) x[xi] = x[xi] / divider;
}
template <typename T>
__global__ void En_KF_dif(const int nPoints, const int dim, const T *X,
const T *x, T *X_diff) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int col = idx % dim;
int row = idx / dim;
if (col < dim && row < nPoints) X_diff[idx] = X[idx] - x[col];
}
// for specialising tests
enum Correlation : unsigned char {
CORRELATED, // = 0
UNCORRELATED
};
template <typename T>
struct MVGInputs {
T tolerance;
typename MultiVarGaussian<T>::Decomposer method;
Correlation corr;
int dim, nPoints;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const MVGInputs<T> &dims) {
return os;
}
template <typename T>
class MVGTest : public ::testing::TestWithParam<MVGInputs<T>> {
protected:
void SetUp() override {
// getting params
params = ::testing::TestWithParam<MVGInputs<T>>::GetParam();
dim = params.dim;
nPoints = params.nPoints;
method = params.method;
corr = params.corr;
tolerance = params.tolerance;
CUBLAS_CHECK(hipblasCreate(&cublasH));
CUSOLVER_CHECK(hipsolverDnCreate(&cusolverH));
CUDA_CHECK(hipStreamCreate(&stream));
// preparing to store stuff
P = (T *)malloc(sizeof(T) * dim * dim);
x = (T *)malloc(sizeof(T) * dim);
X = (T *)malloc(sizeof(T) * dim * nPoints);
CUDA_CHECK(hipMalloc((void **)&P_d, sizeof(T) * dim * dim));
CUDA_CHECK(hipMalloc((void **)&X_d, sizeof(T) * nPoints * dim));
CUDA_CHECK(hipMalloc((void **)&x_d, sizeof(T) * dim));
CUDA_CHECK(hipMalloc((void **)&Rand_cov, sizeof(T) * dim * dim));
CUDA_CHECK(hipMalloc((void **)&Rand_mean, sizeof(T) * dim));
// generating random mean and cov.
srand(params.seed);
for (int j = 0; j < dim; j++) x[j] = rand() % 100 + 5.0f;
// for random Cov. martix
std::default_random_engine generator(params.seed);
std::uniform_real_distribution<T> distribution(0.0, 1.0);
// P (developing a +ve definite symm matrix)
for (int j = 0; j < dim; j++) {
for (int i = 0; i < j + 1; i++) {
T k = distribution(generator);
if (corr == UNCORRELATED) k = 0.0;
P[IDX2C(i, j, dim)] = k;
P[IDX2C(j, i, dim)] = k;
if (i == j) P[IDX2C(i, j, dim)] += dim;
}
}
// porting inputs to gpu
updateDevice(P_d, P, dim * dim, stream);
updateDevice(x_d, x, dim, stream);
// initilizing the mvg
mvg = new MultiVarGaussian<T>(dim, method);
size_t o = mvg->init(cublasH, cusolverH, stream);
// give the workspace area to mvg
CUDA_CHECK(hipMalloc((void **)&workspace_d, o));
mvg->set_workspace(workspace_d);
// get gaussians in X_d | P_d is destroyed.
mvg->give_gaussian(nPoints, P_d, X_d, x_d);
// saving the mean of the randoms in Rand_mean
//@todo can be swapped with a API that calculates mean
CUDA_CHECK(hipMemset(Rand_mean, 0, dim * sizeof(T)));
dim3 block = (64);
dim3 grid = (ceildiv(nPoints * dim, (int)block.x));
hipLaunchKernelGGL(( En_KF_accumulate), dim3(grid), dim3(block), 0, 0, nPoints, dim, X_d, Rand_mean);
CUDA_CHECK(hipPeekAtLastError());
grid = (ceildiv(dim, (int)block.x));
hipLaunchKernelGGL(( En_KF_normalize), dim3(grid), dim3(block), 0, 0, nPoints, dim, Rand_mean);
CUDA_CHECK(hipPeekAtLastError());
// storing the error wrt random point mean in X_d
grid = (ceildiv(dim * nPoints, (int)block.x));
hipLaunchKernelGGL(( En_KF_dif), dim3(grid), dim3(block), 0, 0, nPoints, dim, X_d, Rand_mean, X_d);
CUDA_CHECK(hipPeekAtLastError());
// finding the cov matrix, placing in Rand_cov
T alfa = 1.0 / (nPoints - 1), beta = 0.0;
hipblasHandle_t handle;
CUBLAS_CHECK(hipblasCreate(&handle));
CUBLAS_CHECK(LinAlg::cublasgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, dim, dim,
nPoints, &alfa, X_d, dim, X_d, dim, &beta,
Rand_cov, dim, stream));
// restoring cov provided into P_d
updateDevice(P_d, P, dim * dim, stream);
}
void TearDown() override {
// freeing mallocs
CUDA_CHECK(hipFree(P_d));
CUDA_CHECK(hipFree(X_d));
CUDA_CHECK(hipFree(workspace_d));
free(P);
free(x);
free(X);
// deleting mvg
mvg->deinit();
delete mvg;
CUBLAS_CHECK(hipblasDestroy(cublasH));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolverH));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
MVGInputs<T> params;
T *P, *x, *X, *workspace_d, *P_d, *x_d, *X_d;
int dim, nPoints;
typename MultiVarGaussian<T>::Decomposer method;
Correlation corr;
MultiVarGaussian<T> *mvg = NULL;
T *Rand_cov, *Rand_mean, tolerance;
hipblasHandle_t cublasH;
hipsolverDnHandle_t cusolverH;
hipStream_t stream;
}; // end of MVGTest class
///@todo find out the reason that Un-correlated covs are giving problems (in qr)
// Declare your inputs
const std::vector<MVGInputs<float>> inputsf = {
{0.3f, MultiVarGaussian<float>::Decomposer::chol_decomp,
Correlation::CORRELATED, 5, 30000, 6ULL},
{0.1f, MultiVarGaussian<float>::Decomposer::chol_decomp,
Correlation::UNCORRELATED, 5, 30000, 6ULL},
{0.25f, MultiVarGaussian<float>::Decomposer::jacobi, Correlation::CORRELATED,
5, 30000, 6ULL},
{0.1f, MultiVarGaussian<float>::Decomposer::jacobi, Correlation::UNCORRELATED,
5, 30000, 6ULL},
{0.2f, MultiVarGaussian<float>::Decomposer::qr, Correlation::CORRELATED, 5,
30000, 6ULL},
// { 0.2f, MultiVarGaussian<float>::Decomposer::qr,
// Correlation::UNCORRELATED, 5, 30000, 6ULL}
};
const std::vector<MVGInputs<double>> inputsd = {
{0.25, MultiVarGaussian<double>::Decomposer::chol_decomp,
Correlation::CORRELATED, 10, 3000000, 6ULL},
{0.1, MultiVarGaussian<double>::Decomposer::chol_decomp,
Correlation::UNCORRELATED, 10, 3000000, 6ULL},
{0.25, MultiVarGaussian<double>::Decomposer::jacobi, Correlation::CORRELATED,
10, 3000000, 6ULL},
{0.1, MultiVarGaussian<double>::Decomposer::jacobi, Correlation::UNCORRELATED,
10, 3000000, 6ULL},
{0.2, MultiVarGaussian<double>::Decomposer::qr, Correlation::CORRELATED, 10,
3000000, 6ULL},
// { 0.2, MultiVarGaussian<double>::Decomposer::qr,
// Correlation::UNCORRELATED, 10, 3000000, 6ULL}
};
// make the tests
typedef MVGTest<float> MVGTestF;
typedef MVGTest<double> MVGTestD;
TEST_P(MVGTestF, MeanIsCorrectF) {
EXPECT_TRUE(devArrMatch(x_d, Rand_mean, dim, CompareApprox<float>(tolerance)))
<< " in MeanIsCorrect";
}
TEST_P(MVGTestF, CovIsCorrectF) {
EXPECT_TRUE(
devArrMatch(P_d, Rand_cov, dim, dim, CompareApprox<float>(tolerance)))
<< " in CovIsCorrect";
}
TEST_P(MVGTestD, MeanIsCorrectD) {
EXPECT_TRUE(
devArrMatch(x_d, Rand_mean, dim, CompareApprox<double>(tolerance)))
<< " in MeanIsCorrect";
}
TEST_P(MVGTestD, CovIsCorrectD) {
EXPECT_TRUE(
devArrMatch(P_d, Rand_cov, dim, dim, CompareApprox<double>(tolerance)))
<< " in CovIsCorrect";
}
// call the tests
INSTANTIATE_TEST_CASE_P(MVGTests, MVGTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MVGTests, MVGTestD, ::testing::ValuesIn(inputsd));
}; // end of namespace Random
}; // end of namespace MLCommon
| 8bf1a86d016986c5ed588b9512a0d705cce05c60.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <cmath>
#include <iostream>
#include <random/mvg.cuh>
#include <random>
#include "test_utils.h"
// mvg.h takes in matrices that are colomn major (as in fortan)
#define IDX2C(i, j, ld) (j * ld + i)
namespace MLCommon {
namespace Random {
// helper kernels
/// @todo Duplicate called vctwiseAccumulate in utils.h (Kalman Filters,
// i think that is much better to use., more general)
template <typename T>
__global__ void En_KF_accumulate(const int nPoints, const int dim, const T *X,
T *x) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int col = idx % dim;
int row = idx / dim;
if (col < dim && row < nPoints) myAtomicAdd(x + col, X[idx]);
}
template <typename T>
__global__ void En_KF_normalize(const int divider, const int dim, T *x) {
int xi = threadIdx.x + blockDim.x * blockIdx.x;
if (xi < dim) x[xi] = x[xi] / divider;
}
template <typename T>
__global__ void En_KF_dif(const int nPoints, const int dim, const T *X,
const T *x, T *X_diff) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int col = idx % dim;
int row = idx / dim;
if (col < dim && row < nPoints) X_diff[idx] = X[idx] - x[col];
}
// for specialising tests
enum Correlation : unsigned char {
CORRELATED, // = 0
UNCORRELATED
};
template <typename T>
struct MVGInputs {
T tolerance;
typename MultiVarGaussian<T>::Decomposer method;
Correlation corr;
int dim, nPoints;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const MVGInputs<T> &dims) {
return os;
}
template <typename T>
class MVGTest : public ::testing::TestWithParam<MVGInputs<T>> {
protected:
void SetUp() override {
// getting params
params = ::testing::TestWithParam<MVGInputs<T>>::GetParam();
dim = params.dim;
nPoints = params.nPoints;
method = params.method;
corr = params.corr;
tolerance = params.tolerance;
CUBLAS_CHECK(cublasCreate(&cublasH));
CUSOLVER_CHECK(cusolverDnCreate(&cusolverH));
CUDA_CHECK(cudaStreamCreate(&stream));
// preparing to store stuff
P = (T *)malloc(sizeof(T) * dim * dim);
x = (T *)malloc(sizeof(T) * dim);
X = (T *)malloc(sizeof(T) * dim * nPoints);
CUDA_CHECK(cudaMalloc((void **)&P_d, sizeof(T) * dim * dim));
CUDA_CHECK(cudaMalloc((void **)&X_d, sizeof(T) * nPoints * dim));
CUDA_CHECK(cudaMalloc((void **)&x_d, sizeof(T) * dim));
CUDA_CHECK(cudaMalloc((void **)&Rand_cov, sizeof(T) * dim * dim));
CUDA_CHECK(cudaMalloc((void **)&Rand_mean, sizeof(T) * dim));
// generating random mean and cov.
srand(params.seed);
for (int j = 0; j < dim; j++) x[j] = rand() % 100 + 5.0f;
// for random Cov. martix
std::default_random_engine generator(params.seed);
std::uniform_real_distribution<T> distribution(0.0, 1.0);
// P (developing a +ve definite symm matrix)
for (int j = 0; j < dim; j++) {
for (int i = 0; i < j + 1; i++) {
T k = distribution(generator);
if (corr == UNCORRELATED) k = 0.0;
P[IDX2C(i, j, dim)] = k;
P[IDX2C(j, i, dim)] = k;
if (i == j) P[IDX2C(i, j, dim)] += dim;
}
}
// porting inputs to gpu
updateDevice(P_d, P, dim * dim, stream);
updateDevice(x_d, x, dim, stream);
// initilizing the mvg
mvg = new MultiVarGaussian<T>(dim, method);
size_t o = mvg->init(cublasH, cusolverH, stream);
// give the workspace area to mvg
CUDA_CHECK(cudaMalloc((void **)&workspace_d, o));
mvg->set_workspace(workspace_d);
// get gaussians in X_d | P_d is destroyed.
mvg->give_gaussian(nPoints, P_d, X_d, x_d);
// saving the mean of the randoms in Rand_mean
//@todo can be swapped with a API that calculates mean
CUDA_CHECK(cudaMemset(Rand_mean, 0, dim * sizeof(T)));
dim3 block = (64);
dim3 grid = (ceildiv(nPoints * dim, (int)block.x));
En_KF_accumulate<<<grid, block>>>(nPoints, dim, X_d, Rand_mean);
CUDA_CHECK(cudaPeekAtLastError());
grid = (ceildiv(dim, (int)block.x));
En_KF_normalize<<<grid, block>>>(nPoints, dim, Rand_mean);
CUDA_CHECK(cudaPeekAtLastError());
// storing the error wrt random point mean in X_d
grid = (ceildiv(dim * nPoints, (int)block.x));
En_KF_dif<<<grid, block>>>(nPoints, dim, X_d, Rand_mean, X_d);
CUDA_CHECK(cudaPeekAtLastError());
// finding the cov matrix, placing in Rand_cov
T alfa = 1.0 / (nPoints - 1), beta = 0.0;
cublasHandle_t handle;
CUBLAS_CHECK(cublasCreate(&handle));
CUBLAS_CHECK(LinAlg::cublasgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, dim, dim,
nPoints, &alfa, X_d, dim, X_d, dim, &beta,
Rand_cov, dim, stream));
// restoring cov provided into P_d
updateDevice(P_d, P, dim * dim, stream);
}
void TearDown() override {
// freeing mallocs
CUDA_CHECK(cudaFree(P_d));
CUDA_CHECK(cudaFree(X_d));
CUDA_CHECK(cudaFree(workspace_d));
free(P);
free(x);
free(X);
// deleting mvg
mvg->deinit();
delete mvg;
CUBLAS_CHECK(cublasDestroy(cublasH));
CUSOLVER_CHECK(cusolverDnDestroy(cusolverH));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
MVGInputs<T> params;
T *P, *x, *X, *workspace_d, *P_d, *x_d, *X_d;
int dim, nPoints;
typename MultiVarGaussian<T>::Decomposer method;
Correlation corr;
MultiVarGaussian<T> *mvg = NULL;
T *Rand_cov, *Rand_mean, tolerance;
cublasHandle_t cublasH;
cusolverDnHandle_t cusolverH;
cudaStream_t stream;
}; // end of MVGTest class
///@todo find out the reason that Un-correlated covs are giving problems (in qr)
// Declare your inputs
const std::vector<MVGInputs<float>> inputsf = {
{0.3f, MultiVarGaussian<float>::Decomposer::chol_decomp,
Correlation::CORRELATED, 5, 30000, 6ULL},
{0.1f, MultiVarGaussian<float>::Decomposer::chol_decomp,
Correlation::UNCORRELATED, 5, 30000, 6ULL},
{0.25f, MultiVarGaussian<float>::Decomposer::jacobi, Correlation::CORRELATED,
5, 30000, 6ULL},
{0.1f, MultiVarGaussian<float>::Decomposer::jacobi, Correlation::UNCORRELATED,
5, 30000, 6ULL},
{0.2f, MultiVarGaussian<float>::Decomposer::qr, Correlation::CORRELATED, 5,
30000, 6ULL},
// { 0.2f, MultiVarGaussian<float>::Decomposer::qr,
// Correlation::UNCORRELATED, 5, 30000, 6ULL}
};
const std::vector<MVGInputs<double>> inputsd = {
{0.25, MultiVarGaussian<double>::Decomposer::chol_decomp,
Correlation::CORRELATED, 10, 3000000, 6ULL},
{0.1, MultiVarGaussian<double>::Decomposer::chol_decomp,
Correlation::UNCORRELATED, 10, 3000000, 6ULL},
{0.25, MultiVarGaussian<double>::Decomposer::jacobi, Correlation::CORRELATED,
10, 3000000, 6ULL},
{0.1, MultiVarGaussian<double>::Decomposer::jacobi, Correlation::UNCORRELATED,
10, 3000000, 6ULL},
{0.2, MultiVarGaussian<double>::Decomposer::qr, Correlation::CORRELATED, 10,
3000000, 6ULL},
// { 0.2, MultiVarGaussian<double>::Decomposer::qr,
// Correlation::UNCORRELATED, 10, 3000000, 6ULL}
};
// make the tests
typedef MVGTest<float> MVGTestF;
typedef MVGTest<double> MVGTestD;
TEST_P(MVGTestF, MeanIsCorrectF) {
EXPECT_TRUE(devArrMatch(x_d, Rand_mean, dim, CompareApprox<float>(tolerance)))
<< " in MeanIsCorrect";
}
TEST_P(MVGTestF, CovIsCorrectF) {
EXPECT_TRUE(
devArrMatch(P_d, Rand_cov, dim, dim, CompareApprox<float>(tolerance)))
<< " in CovIsCorrect";
}
TEST_P(MVGTestD, MeanIsCorrectD) {
EXPECT_TRUE(
devArrMatch(x_d, Rand_mean, dim, CompareApprox<double>(tolerance)))
<< " in MeanIsCorrect";
}
TEST_P(MVGTestD, CovIsCorrectD) {
EXPECT_TRUE(
devArrMatch(P_d, Rand_cov, dim, dim, CompareApprox<double>(tolerance)))
<< " in CovIsCorrect";
}
// call the tests
INSTANTIATE_TEST_CASE_P(MVGTests, MVGTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MVGTests, MVGTestD, ::testing::ValuesIn(inputsd));
}; // end of namespace Random
}; // end of namespace MLCommon
|
d495f964c5dbef68af26be1a753f1edd0724246f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Example showing the use of CUFFT for fast 1D-convolution using FFT. */
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// Complex data type
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
static __global__ void ComplexPointwiseMulAndScale(Complex *, const Complex *, int, float);
// Filtering functions
void Convolve(const Complex *, int, const Complex *, int, Complex *);
// Padding functions
int PadData(const Complex *, Complex **, int,
const Complex *, Complex **, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char **argv)
{
printf("[simpleCUFFT] is starting...\n");
findCudaDevice(argc, (const char **)argv);
// Allocate host memory for the signal
Complex *h_signal = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Initalize the memory for the signal
for (unsigned int i = 0; i < SIGNAL_SIZE; ++i)
{
h_signal[i].x = rand() / (float)RAND_MAX;
h_signal[i].y = 0;
}
// Allocate host memory for the filter
Complex *h_filter_kernel = (Complex *)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE);
// Initalize the memory for the filter
for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i)
{
h_filter_kernel[i].x = rand() / (float)RAND_MAX;
h_filter_kernel[i].y = 0;
}
// Pad signal and filter kernel
Complex *h_padded_signal;
Complex *h_padded_filter_kernel;
int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE,
h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
// Allocate device memory for signal
Complex *d_signal;
checkCudaErrors(hipMalloc((void **)&d_signal, mem_size));
// Copy host memory to device
checkCudaErrors(hipMemcpy(d_signal, h_padded_signal, mem_size,
hipMemcpyHostToDevice));
// Allocate device memory for filter kernel
Complex *d_filter_kernel;
checkCudaErrors(hipMalloc((void **)&d_filter_kernel, mem_size));
// Copy host memory to device
checkCudaErrors(hipMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size,
hipMemcpyHostToDevice));
// CUFFT plan
hipfftHandle plan;
checkCudaErrors(hipfftPlan1d(&plan, new_size, HIPFFT_C2C, 1));
// Transform signal and kernel
printf("Transforming signal hipfftExecC2C\n");
checkCudaErrors(hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_FORWARD));
checkCudaErrors(hipfftExecC2C(plan, (hipfftComplex *)d_filter_kernel, (hipfftComplex *)d_filter_kernel, HIPFFT_FORWARD));
// Multiply the coefficients together and normalize the result
printf("Launching ComplexPointwiseMulAndScale<<< >>>\n");
hipLaunchKernelGGL(( ComplexPointwiseMulAndScale), dim3(32), dim3(256), 0, 0, d_signal, d_filter_kernel, new_size, 1.0f / new_size);
// Check if kernel execution generated and error
getLastCudaError("Kernel execution failed [ ComplexPointwiseMulAndScale ]");
// Transform signal back
printf("Transforming signal back hipfftExecC2C\n");
checkCudaErrors(hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_BACKWARD));
// Copy device memory to host
Complex *h_convolved_signal = h_padded_signal;
checkCudaErrors(hipMemcpy(h_convolved_signal, d_signal, mem_size,
hipMemcpyDeviceToHost));
// Allocate host memory for the convolution result
Complex *h_convolved_signal_ref = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Convolve on the host
Convolve(h_signal, SIGNAL_SIZE,
h_filter_kernel, FILTER_KERNEL_SIZE,
h_convolved_signal_ref);
// check result
bool bTestResult = sdkCompareL2fe((float *)h_convolved_signal_ref, (float *)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f);
//Destroy CUFFT context
checkCudaErrors(hipfftDestroy(plan));
// cleanup memory
free(h_signal);
free(h_filter_kernel);
free(h_padded_signal);
free(h_padded_filter_kernel);
free(h_convolved_signal_ref);
checkCudaErrors(hipFree(d_signal));
checkCudaErrors(hipFree(d_filter_kernel));
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
// Pad data
int PadData(const Complex *signal, Complex **padded_signal, int signal_size,
const Complex *filter_kernel, Complex **padded_filter_kernel, int filter_kernel_size)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
int new_size = signal_size + maxRadius;
// Pad signal
Complex *new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, signal, signal_size * sizeof(Complex));
memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex));
*padded_signal = new_data;
// Pad filter
new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex));
memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex));
memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex));
*padded_filter_kernel = new_data;
return new_size;
}
////////////////////////////////////////////////////////////////////////////////
// Filtering operations
////////////////////////////////////////////////////////////////////////////////
// Computes convolution on the host
void Convolve(const Complex *signal, int signal_size,
const Complex *filter_kernel, int filter_kernel_size,
Complex *filtered_signal)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
// Loop over output element indices
for (int i = 0; i < signal_size; ++i)
{
filtered_signal[i].x = filtered_signal[i].y = 0;
// Loop over convolution indices
for (int j = - maxRadius + 1; j <= minRadius; ++j)
{
int k = i + j;
if (k >= 0 && k < signal_size)
{
filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
// Complex pointwise multiplication
static __global__ void ComplexPointwiseMulAndScale(Complex *a, const Complex *b, int size, float scale)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
{
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
}
| d495f964c5dbef68af26be1a753f1edd0724246f.cu | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* Example showing the use of CUFFT for fast 1D-convolution using FFT. */
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cuda_runtime.h>
#include <cufft.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// Complex data type
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
static __global__ void ComplexPointwiseMulAndScale(Complex *, const Complex *, int, float);
// Filtering functions
void Convolve(const Complex *, int, const Complex *, int, Complex *);
// Padding functions
int PadData(const Complex *, Complex **, int,
const Complex *, Complex **, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char **argv);
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char **argv)
{
printf("[simpleCUFFT] is starting...\n");
findCudaDevice(argc, (const char **)argv);
// Allocate host memory for the signal
Complex *h_signal = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Initalize the memory for the signal
for (unsigned int i = 0; i < SIGNAL_SIZE; ++i)
{
h_signal[i].x = rand() / (float)RAND_MAX;
h_signal[i].y = 0;
}
// Allocate host memory for the filter
Complex *h_filter_kernel = (Complex *)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE);
// Initalize the memory for the filter
for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i)
{
h_filter_kernel[i].x = rand() / (float)RAND_MAX;
h_filter_kernel[i].y = 0;
}
// Pad signal and filter kernel
Complex *h_padded_signal;
Complex *h_padded_filter_kernel;
int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE,
h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
// Allocate device memory for signal
Complex *d_signal;
checkCudaErrors(cudaMalloc((void **)&d_signal, mem_size));
// Copy host memory to device
checkCudaErrors(cudaMemcpy(d_signal, h_padded_signal, mem_size,
cudaMemcpyHostToDevice));
// Allocate device memory for filter kernel
Complex *d_filter_kernel;
checkCudaErrors(cudaMalloc((void **)&d_filter_kernel, mem_size));
// Copy host memory to device
checkCudaErrors(cudaMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size,
cudaMemcpyHostToDevice));
// CUFFT plan
cufftHandle plan;
checkCudaErrors(cufftPlan1d(&plan, new_size, CUFFT_C2C, 1));
// Transform signal and kernel
printf("Transforming signal cufftExecC2C\n");
checkCudaErrors(cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_FORWARD));
checkCudaErrors(cufftExecC2C(plan, (cufftComplex *)d_filter_kernel, (cufftComplex *)d_filter_kernel, CUFFT_FORWARD));
// Multiply the coefficients together and normalize the result
printf("Launching ComplexPointwiseMulAndScale<<< >>>\n");
ComplexPointwiseMulAndScale<<<32, 256>>>(d_signal, d_filter_kernel, new_size, 1.0f / new_size);
// Check if kernel execution generated and error
getLastCudaError("Kernel execution failed [ ComplexPointwiseMulAndScale ]");
// Transform signal back
printf("Transforming signal back cufftExecC2C\n");
checkCudaErrors(cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_INVERSE));
// Copy device memory to host
Complex *h_convolved_signal = h_padded_signal;
checkCudaErrors(cudaMemcpy(h_convolved_signal, d_signal, mem_size,
cudaMemcpyDeviceToHost));
// Allocate host memory for the convolution result
Complex *h_convolved_signal_ref = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Convolve on the host
Convolve(h_signal, SIGNAL_SIZE,
h_filter_kernel, FILTER_KERNEL_SIZE,
h_convolved_signal_ref);
// check result
bool bTestResult = sdkCompareL2fe((float *)h_convolved_signal_ref, (float *)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f);
//Destroy CUFFT context
checkCudaErrors(cufftDestroy(plan));
// cleanup memory
free(h_signal);
free(h_filter_kernel);
free(h_padded_signal);
free(h_padded_filter_kernel);
free(h_convolved_signal_ref);
checkCudaErrors(cudaFree(d_signal));
checkCudaErrors(cudaFree(d_filter_kernel));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
// Pad data
int PadData(const Complex *signal, Complex **padded_signal, int signal_size,
const Complex *filter_kernel, Complex **padded_filter_kernel, int filter_kernel_size)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
int new_size = signal_size + maxRadius;
// Pad signal
Complex *new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, signal, signal_size * sizeof(Complex));
memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex));
*padded_signal = new_data;
// Pad filter
new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex));
memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex));
memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex));
*padded_filter_kernel = new_data;
return new_size;
}
////////////////////////////////////////////////////////////////////////////////
// Filtering operations
////////////////////////////////////////////////////////////////////////////////
// Computes convolution on the host
void Convolve(const Complex *signal, int signal_size,
const Complex *filter_kernel, int filter_kernel_size,
Complex *filtered_signal)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
// Loop over output element indices
for (int i = 0; i < signal_size; ++i)
{
filtered_signal[i].x = filtered_signal[i].y = 0;
// Loop over convolution indices
for (int j = - maxRadius + 1; j <= minRadius; ++j)
{
int k = i + j;
if (k >= 0 && k < signal_size)
{
filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
// Complex pointwise multiplication
static __global__ void ComplexPointwiseMulAndScale(Complex *a, const Complex *b, int size, float scale)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
{
a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale);
}
}
|
753517c7b65ba0fd4b01b40d003ec098f6585553.hip | // !!! This is a file automatically generated by hipify!!!
#include "myobj.h"
#include "book.h"
#include "timer.h"
#include "myvector.h"
#include <fstream>
#include <iostream>
#include <algorithm>
#include <hip/hip_runtime.h>
MyObj MyObj::load(const std::string &path) {
MyObj obj;
std::ifstream f;
f.open(path, std::ios::in);
std::string s;
float x, y, z;
int f_v1, f_v2, f_v3;
int f_vn1, f_vn2, f_vn3;
while (f >> s) {
if (s == "vt") {
f >> x >> y;
} else if (s == "v") {
f >> x >> y >> z;
obj.vs.emplace_back(x, y, z);
} else if (s == "ny") {
f >> x >> y >> z;
obj.vns.emplace_back(x, y, z);
} else if (s == "f") {
f >> f_v1; f.ignore(); f >> f_vn1;
f >> f_v2; f.ignore(); f >> f_vn2;
f >> f_v3; f.ignore(); f >> f_vn3;
obj.fs.emplace_back(f_v1 - 1, f_v2 - 1, f_v3 - 1);
} else if (s == "td") {
f >> x;
}
}
return obj;
}
int MyObj::nFace() {
return (int)fs.size();
}
int MyObj::nVertex() {
return (int)vs.size();
}
bool MyObj::triContactDetection(int i, int j) const {
if (fs[i].hasSharedWith(fs[j]))
return false;
return tri_contact(vs[fs[i][0]], vs[fs[i][1]], vs[fs[i][2]],
vs[fs[j][0]], vs[fs[j][1]], vs[fs[j][2]]);
}
void MyObj::constructBVH() {
leaves = new BVHNode*[nFace()];
for (int i = 0; i < nFace(); i++) {
leaves[i] = new BVHNode;
leaves[i]->setTriangle(i, vs[fs[i][0]], vs[fs[i][1]], vs[fs[i][2]]);
}
bvh = BVHNode::build(leaves, nFace());
}
int MyObj::selfContactDetection() {
pairs.clear();
Timer t;
t.start();
std::cout << "Constructing BVH";
constructBVH();
double elapse = t.end();
printf(", used %.3f s\n", elapse);
int cnt = 0;
t.start();
for (int i = 0; i < nFace(); i++) {
cnt += leaves[i]->contact(bvh, this, pairs);
}
elapse = t.end();
std::cout << elapse << " " << cnt << " " << (nFace() * (nFace() - 1)) / 2 << std::endl;
return cnt / 2;
}
unsigned long long MyObj::allocObjMem() {
unsigned long long gpuObjMemSize = sizeof(vec3f) * vs.size() + sizeof(Triangle) * fs.size();
std::cout << "allocating for object, " << (double)gpuObjMemSize / 1024 / 1024 << " MB" << std::endl;
hipMalloc(&d_vs, sizeof(vec3f) * nVertex());
hipMalloc(&d_fs, sizeof(Triangle) * nFace());
hipMemcpy(d_vs, vs.data(), sizeof(vec3f) * nVertex(), hipMemcpyHostToDevice);
hipMemcpy(d_fs, fs.data(), sizeof(Triangle) * nFace(), hipMemcpyHostToDevice);
return gpuObjMemSize;
}
void MyObj::freeObjMem() {
hipFree(d_vs);
hipFree(d_fs);
d_vs = nullptr;
d_fs = nullptr;
}
__host__ __device__
bool MyObj::triContactDetectionCUDA(vec3f *d_vs, Triangle *d_fs, int i, int j) {
if (d_fs[i].hasSharedWith(d_fs[j]))
return false;
return tri_contact(d_vs[d_fs[i][0]], d_vs[d_fs[i][1]], d_vs[d_fs[i][2]],
d_vs[d_fs[j][0]], d_vs[d_fs[j][1]], d_vs[d_fs[j][2]]);
}
int MyObj::selfContactDetectionCUDA(int blockSize) {
// std::set<std::pair<int, int>> pairs;
Timer t;
t.start();
std::cout << "Constructing BVH";
constructBVH();
h_bvh = bvh->dense();
double e = t.end();
printf(", used %.3f s\n", e);
auto gpuBVHMemSize = sizeof(BVHDenseNode) * bvh->count();
std::cout << "allocating for BVH, " << (double)gpuBVHMemSize / 1024 / 1024 << " MB" << std::endl;
HANDLE_ERROR(hipMalloc(&d_bvh, gpuBVHMemSize));
HANDLE_ERROR(hipMemcpy(d_bvh, h_bvh, gpuBVHMemSize, hipMemcpyHostToDevice));
std::cout << "copied BVH to GPU" << std::endl;
auto gpuLeafMemSize = sizeof(int) * nFace();
leaf_idx = new int[nFace()];
for (int i = 0; i < nFace(); i++) {
leaf_idx[i] = leaves[i]->idx;
}
std::cout << "allocating memory for leaves, " << (double)gpuLeafMemSize / 1024 / 1024 << " MB" << std::endl;
HANDLE_ERROR(hipMalloc(&d_leaves, gpuLeafMemSize));
HANDLE_ERROR(hipMemcpy(d_leaves, leaf_idx, gpuLeafMemSize, hipMemcpyHostToDevice));
auto gpuObjMemSize = allocObjMem();
MyVector *h_vec = new MyVector[nFace()];
MyVector *d_vec;
auto gpuResMemSize = sizeof(MyVector) * nFace();
std::cout << "allocating for results, " << (double)gpuResMemSize / 1024 / 1024 << " MB" << std::endl;
hipMalloc(&d_vec, gpuResMemSize);
hipMemcpy(d_vec, h_vec, gpuResMemSize, hipMemcpyHostToDevice);
auto gpuAllMemSize = gpuLeafMemSize + gpuBVHMemSize + gpuObjMemSize + gpuResMemSize;
std::cout << "Totally allocated " << (double)gpuAllMemSize / 1024 / 1024 << " MB" << std::endl;
int *h_a = new int[nFace()];
int *d_a;
memset(h_a, 0, sizeof(int) * nFace());
hipMalloc(&d_a, sizeof(int) * nFace());
hipMemcpy(d_a, h_a, sizeof(int) * nFace(), hipMemcpyHostToDevice);
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, nullptr));
int blockNum = (nFace() + blockSize - 1) / blockSize;
printf("Start detecting ...\n");
hipLaunchKernelGGL(( cdOnCUDA), dim3(blockNum), dim3(blockSize), 0, 0, d_vs, d_fs, d_leaves, d_bvh, d_vec, d_a, nFace(), bvh->height);
HANDLE_ERROR(hipGetLastError());
HANDLE_ERROR(hipEventRecord(stop, nullptr));
HANDLE_ERROR(hipEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop));
printf("Time to detect: %3.1f ms\n", elapsedTime);
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
hipMemcpy(h_vec, d_vec, gpuResMemSize, hipMemcpyDeviceToHost);
hipMemcpy(h_a, d_a, sizeof(int) * nFace(), hipMemcpyDeviceToHost);
int cnt = 0;
for (int i = 0; i < nFace(); i++) {
cnt += h_a[i];
}
hipFree(d_leaves);
hipFree(d_vec);
delete[] h_a;
delete[] h_vec;
freeObjMem();
return cnt / 2;
}
MyObj::~MyObj() {
free(leaves);
delete bvh;
if (h_bvh) {
free(h_bvh);
free(leaf_idx);
}
}
__host__ __device__ bool Triangle::hasSharedWith(const Triangle &t) const {
for (auto &v : v_id) {
for (auto &v2 : t.v_id) {
if (v == v2) {
return true;
}
}
}
return false;
}
__global__ void cdOnCUDA(vec3f *d_vs, Triangle *d_fs, int *d_leaves, BVHDenseNode *d_bvh, MyVector *d_vec, int *a, int nFace, int height) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < nFace) {
a[offset] = d_bvh[d_leaves[offset]].contact_stack(d_bvh, height, d_vs, d_fs, d_vec);
}
}
| 753517c7b65ba0fd4b01b40d003ec098f6585553.cu | #include "myobj.h"
#include "book.h"
#include "timer.h"
#include "myvector.h"
#include <fstream>
#include <iostream>
#include <algorithm>
#include <cuda.h>
MyObj MyObj::load(const std::string &path) {
MyObj obj;
std::ifstream f;
f.open(path, std::ios::in);
std::string s;
float x, y, z;
int f_v1, f_v2, f_v3;
int f_vn1, f_vn2, f_vn3;
while (f >> s) {
if (s == "vt") {
f >> x >> y;
} else if (s == "v") {
f >> x >> y >> z;
obj.vs.emplace_back(x, y, z);
} else if (s == "ny") {
f >> x >> y >> z;
obj.vns.emplace_back(x, y, z);
} else if (s == "f") {
f >> f_v1; f.ignore(); f >> f_vn1;
f >> f_v2; f.ignore(); f >> f_vn2;
f >> f_v3; f.ignore(); f >> f_vn3;
obj.fs.emplace_back(f_v1 - 1, f_v2 - 1, f_v3 - 1);
} else if (s == "td") {
f >> x;
}
}
return obj;
}
int MyObj::nFace() {
return (int)fs.size();
}
int MyObj::nVertex() {
return (int)vs.size();
}
bool MyObj::triContactDetection(int i, int j) const {
if (fs[i].hasSharedWith(fs[j]))
return false;
return tri_contact(vs[fs[i][0]], vs[fs[i][1]], vs[fs[i][2]],
vs[fs[j][0]], vs[fs[j][1]], vs[fs[j][2]]);
}
void MyObj::constructBVH() {
leaves = new BVHNode*[nFace()];
for (int i = 0; i < nFace(); i++) {
leaves[i] = new BVHNode;
leaves[i]->setTriangle(i, vs[fs[i][0]], vs[fs[i][1]], vs[fs[i][2]]);
}
bvh = BVHNode::build(leaves, nFace());
}
int MyObj::selfContactDetection() {
pairs.clear();
Timer t;
t.start();
std::cout << "Constructing BVH";
constructBVH();
double elapse = t.end();
printf(", used %.3f s\n", elapse);
int cnt = 0;
t.start();
for (int i = 0; i < nFace(); i++) {
cnt += leaves[i]->contact(bvh, this, pairs);
}
elapse = t.end();
std::cout << elapse << " " << cnt << " " << (nFace() * (nFace() - 1)) / 2 << std::endl;
return cnt / 2;
}
unsigned long long MyObj::allocObjMem() {
unsigned long long gpuObjMemSize = sizeof(vec3f) * vs.size() + sizeof(Triangle) * fs.size();
std::cout << "allocating for object, " << (double)gpuObjMemSize / 1024 / 1024 << " MB" << std::endl;
cudaMalloc(&d_vs, sizeof(vec3f) * nVertex());
cudaMalloc(&d_fs, sizeof(Triangle) * nFace());
cudaMemcpy(d_vs, vs.data(), sizeof(vec3f) * nVertex(), cudaMemcpyHostToDevice);
cudaMemcpy(d_fs, fs.data(), sizeof(Triangle) * nFace(), cudaMemcpyHostToDevice);
return gpuObjMemSize;
}
void MyObj::freeObjMem() {
cudaFree(d_vs);
cudaFree(d_fs);
d_vs = nullptr;
d_fs = nullptr;
}
__host__ __device__
bool MyObj::triContactDetectionCUDA(vec3f *d_vs, Triangle *d_fs, int i, int j) {
if (d_fs[i].hasSharedWith(d_fs[j]))
return false;
return tri_contact(d_vs[d_fs[i][0]], d_vs[d_fs[i][1]], d_vs[d_fs[i][2]],
d_vs[d_fs[j][0]], d_vs[d_fs[j][1]], d_vs[d_fs[j][2]]);
}
int MyObj::selfContactDetectionCUDA(int blockSize) {
// std::set<std::pair<int, int>> pairs;
Timer t;
t.start();
std::cout << "Constructing BVH";
constructBVH();
h_bvh = bvh->dense();
double e = t.end();
printf(", used %.3f s\n", e);
auto gpuBVHMemSize = sizeof(BVHDenseNode) * bvh->count();
std::cout << "allocating for BVH, " << (double)gpuBVHMemSize / 1024 / 1024 << " MB" << std::endl;
HANDLE_ERROR(cudaMalloc(&d_bvh, gpuBVHMemSize));
HANDLE_ERROR(cudaMemcpy(d_bvh, h_bvh, gpuBVHMemSize, cudaMemcpyHostToDevice));
std::cout << "copied BVH to GPU" << std::endl;
auto gpuLeafMemSize = sizeof(int) * nFace();
leaf_idx = new int[nFace()];
for (int i = 0; i < nFace(); i++) {
leaf_idx[i] = leaves[i]->idx;
}
std::cout << "allocating memory for leaves, " << (double)gpuLeafMemSize / 1024 / 1024 << " MB" << std::endl;
HANDLE_ERROR(cudaMalloc(&d_leaves, gpuLeafMemSize));
HANDLE_ERROR(cudaMemcpy(d_leaves, leaf_idx, gpuLeafMemSize, cudaMemcpyHostToDevice));
auto gpuObjMemSize = allocObjMem();
MyVector *h_vec = new MyVector[nFace()];
MyVector *d_vec;
auto gpuResMemSize = sizeof(MyVector) * nFace();
std::cout << "allocating for results, " << (double)gpuResMemSize / 1024 / 1024 << " MB" << std::endl;
cudaMalloc(&d_vec, gpuResMemSize);
cudaMemcpy(d_vec, h_vec, gpuResMemSize, cudaMemcpyHostToDevice);
auto gpuAllMemSize = gpuLeafMemSize + gpuBVHMemSize + gpuObjMemSize + gpuResMemSize;
std::cout << "Totally allocated " << (double)gpuAllMemSize / 1024 / 1024 << " MB" << std::endl;
int *h_a = new int[nFace()];
int *d_a;
memset(h_a, 0, sizeof(int) * nFace());
cudaMalloc(&d_a, sizeof(int) * nFace());
cudaMemcpy(d_a, h_a, sizeof(int) * nFace(), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, nullptr));
int blockNum = (nFace() + blockSize - 1) / blockSize;
printf("Start detecting ...\n");
cdOnCUDA<<<blockNum, blockSize>>>(d_vs, d_fs, d_leaves, d_bvh, d_vec, d_a, nFace(), bvh->height);
HANDLE_ERROR(cudaGetLastError());
HANDLE_ERROR(cudaEventRecord(stop, nullptr));
HANDLE_ERROR(cudaEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("Time to detect: %3.1f ms\n", elapsedTime);
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
cudaMemcpy(h_vec, d_vec, gpuResMemSize, cudaMemcpyDeviceToHost);
cudaMemcpy(h_a, d_a, sizeof(int) * nFace(), cudaMemcpyDeviceToHost);
int cnt = 0;
for (int i = 0; i < nFace(); i++) {
cnt += h_a[i];
}
cudaFree(d_leaves);
cudaFree(d_vec);
delete[] h_a;
delete[] h_vec;
freeObjMem();
return cnt / 2;
}
MyObj::~MyObj() {
free(leaves);
delete bvh;
if (h_bvh) {
free(h_bvh);
free(leaf_idx);
}
}
__host__ __device__ bool Triangle::hasSharedWith(const Triangle &t) const {
for (auto &v : v_id) {
for (auto &v2 : t.v_id) {
if (v == v2) {
return true;
}
}
}
return false;
}
__global__ void cdOnCUDA(vec3f *d_vs, Triangle *d_fs, int *d_leaves, BVHDenseNode *d_bvh, MyVector *d_vec, int *a, int nFace, int height) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < nFace) {
a[offset] = d_bvh[d_leaves[offset]].contact_stack(d_bvh, height, d_vs, d_fs, d_vec);
}
}
|
b286831acf43a7b527131c816477a1b26752e1b4.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
//#define mm_BLOCK_SIZE_x 8
#define mm_BLOCK_SIZE 32
//#define mm_SUPER_BLOCKS_PER_SM 4
//int mm_SUPER_BLOCKS_PER_SM = 4;
#define iSizeMultiple 4 //must be multipes of 15
#define WA (12 * mm_BLOCK_SIZE) // Matrix A width
#define HA (12 * mm_BLOCK_SIZE) // Matrix A height
//#define WB (mm_SUPER_BLOCKS_PER_SM * mm_BLOCK_SIZE) // Matrix B width
#define WB (12 * mm_BLOCK_SIZE) // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
#define mm_GRID_X (WC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_GRID_Y (HC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_NBLOCKS (mm_GRID_X*mm_GRID_Y)
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
__global__ void
mm_kernel( float* C, float* A, float* B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * mm_BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = mm_BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = mm_BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = mm_BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < mm_BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * mm_BLOCK_SIZE * by + mm_BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
//if (threadIdx.x==0&&threadIdx.y==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
// hipSetDevice(1);
srand(2013);
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = WA * iSizeMultiple;
uiHA = HA * iSizeMultiple;
uiWB = WB * iSizeMultiple;
uiHB = HB * iSizeMultiple;
uiWC = WC * iSizeMultiple;
uiHC = HC * iSizeMultiple;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
//printf("size A = %d bytes,size B=%d bytes\n",mem_size_A,mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A, *d_B, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
printf("size A = %d bytes,size B=%d bytes,size C=%d bytes\n",mem_size_A,mem_size_B,mem_size_C);
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
float* h_CUBLAS = (float*) malloc(mem_size_C);
checkCudaErrors(hipMalloc((void**) &d_A, mem_size_A));
checkCudaErrors(hipMalloc((void**) &d_B, mem_size_B));
// copy host memory to device
checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice) );
checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice) );
checkCudaErrors(hipMalloc((void**) &d_C, mem_size_C));
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
// setup execution parameters
dim3 mm_grid(mm_GRID_X, mm_GRID_Y);
dim3 mm_block(mm_BLOCK_SIZE, mm_BLOCK_SIZE);
// int mm_grid=mm_GRID_X*mm_GRID_Y;
hipLaunchKernelGGL(( mm_kernel), dim3(mm_grid), dim3(mm_block), 0, 0, d_C, d_A, d_B, uiWA, uiWB);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
// copy result from device to host
checkCudaErrors(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost) );
// compute reference solution
float* reference = (float*)malloc(mem_size_C);
computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB);
// check result (matrixMul)
bool resCUDA = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f);
printf("CUDA matrixMul compares %s\n\n", (true == resCUDA) ? "passed" : "FAIL");
// ofstream f1("mm_correct.txt");
// for(int i=0; i<size_C; ++i)
// f1 << reference[i] << endl;
// f1.close();
//
// ofstream f2("mm_gpu.txt");
// for(int i=0; i<size_C; ++i)
// f2 << h_C[i] << endl;
// f2.close();
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
return 0;
}
| b286831acf43a7b527131c816477a1b26752e1b4.cu |
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
//#define mm_BLOCK_SIZE_x 8
#define mm_BLOCK_SIZE 32
//#define mm_SUPER_BLOCKS_PER_SM 4
//int mm_SUPER_BLOCKS_PER_SM = 4;
#define iSizeMultiple 4 //must be multipes of 15
#define WA (12 * mm_BLOCK_SIZE) // Matrix A width
#define HA (12 * mm_BLOCK_SIZE) // Matrix A height
//#define WB (mm_SUPER_BLOCKS_PER_SM * mm_BLOCK_SIZE) // Matrix B width
#define WB (12 * mm_BLOCK_SIZE) // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
#define mm_GRID_X (WC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_GRID_Y (HC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_NBLOCKS (mm_GRID_X*mm_GRID_Y)
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
__global__ void
mm_kernel( float* C, float* A, float* B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * mm_BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = mm_BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = mm_BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = mm_BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < mm_BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * mm_BLOCK_SIZE * by + mm_BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
//if (threadIdx.x==0&&threadIdx.y==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
// cudaSetDevice(1);
srand(2013);
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = WA * iSizeMultiple;
uiHA = HA * iSizeMultiple;
uiWB = WB * iSizeMultiple;
uiHB = HB * iSizeMultiple;
uiWC = WC * iSizeMultiple;
uiHC = HC * iSizeMultiple;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
//printf("size A = %d bytes,size B=%d bytes\n",mem_size_A,mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A, *d_B, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
printf("size A = %d bytes,size B=%d bytes,size C=%d bytes\n",mem_size_A,mem_size_B,mem_size_C);
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
float* h_CUBLAS = (float*) malloc(mem_size_C);
checkCudaErrors(cudaMalloc((void**) &d_A, mem_size_A));
checkCudaErrors(cudaMalloc((void**) &d_B, mem_size_B));
// copy host memory to device
checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaMalloc((void**) &d_C, mem_size_C));
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
// setup execution parameters
dim3 mm_grid(mm_GRID_X, mm_GRID_Y);
dim3 mm_block(mm_BLOCK_SIZE, mm_BLOCK_SIZE);
// int mm_grid=mm_GRID_X*mm_GRID_Y;
mm_kernel<<< mm_grid, mm_block>>>(d_C, d_A, d_B, uiWA, uiWB);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
// copy result from device to host
checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost) );
// compute reference solution
float* reference = (float*)malloc(mem_size_C);
computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB);
// check result (matrixMul)
bool resCUDA = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f);
printf("CUDA matrixMul compares %s\n\n", (true == resCUDA) ? "passed" : "FAIL");
// ofstream f1("mm_correct.txt");
// for(int i=0; i<size_C; ++i)
// f1 << reference[i] << endl;
// f1.close();
//
// ofstream f2("mm_gpu.txt");
// for(int i=0; i<size_C; ++i)
// f2 << h_C[i] << endl;
// f2.close();
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
return 0;
}
|
e484032b3e7a80ab319b8143555a82793e143e93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] += add[add_index];
} | e484032b3e7a80ab319b8143555a82793e143e93.cu | #include "includes.h"
__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
int i = id % minw;
id /= minw;
int j = id % minh;
id /= minh;
int k = id % minc;
id /= minc;
int b = id % batch;
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] += add[add_index];
} |
c747bf67dd2c24e62c1d28222726e14223af0af5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <set>
#include <scan.cu>
#include <stratify_none.cu>
#include <stratify_high_degree.cu>
#include <stratify_low_degree.cu>
using namespace std;
__global__ void stratify(double *numbering, float *roots, int *indptr, int *indices, double delta, int n);
__host__ __device__ void print_array(float *a, int n)
{
for(int i=0; i<n; i++)
printf("%f ", a[i]);
printf("\\n");
}
__host__ __device__ void parallel_prefix(float *d_idata, float *d_odata, int num_elements)
{
num_elements += 1;
float** g_scanBlockSums;
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numElts = num_elements;
int level = 0;
do
{
unsigned int numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
{
level++;
}
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSums = (float**) malloc(level * sizeof(float*));
numElts = num_elements;
level = 0;
do
{
unsigned int numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
{
hipMalloc((void**) &g_scanBlockSums[level++],
numBlocks * sizeof(float));
}
numElts = numBlocks;
} while (numElts > 1);
prescanArrayRecursive(d_odata, d_idata, num_elements, 0, g_scanBlockSums);
}
__global__ void copy_array(double *a, double *b)
{
const int i = (blockIdx.x * blockDim.x) + threadIdx.x;
b[i] = a[i];
}
__global__ void init_array(float *arr, float val)
{
const int i = (blockIdx.x * blockDim.x) + threadIdx.x;
arr[i] = val;
}
__global__ void init_array_double(double *arr, double val)
{
const int i = (blockIdx.x * blockDim.x) + threadIdx.x;
arr[i] = val;
}
__global__ void init_array_consecutive(float *arr)
{
const int i = (blockIdx.x * blockDim.x) + threadIdx.x;
arr[i] = i;
}
__global__ void split_classes(double *numbering, int *indptr, int *indices, float *mask, float *roots, float *changes)
{
const int i = threadIdx.x;
float min = roots[i];
if(mask[i] == 0){
roots[i] = -1;
return;
}
for(int j = indptr[i]; j < indptr[i+1]; j++){
if(mask[indices[j]] == 1 && numbering[i] == numbering[indices[j]] && roots[indices[j]] < min){
min = roots[indices[j]];
}
}
if(min != roots[i]){
roots[i] = min;
changes[i] += 1;
}
}
__host__ __device__ void get_class_components(double *numbering, int *indptr, int *indices, float *mask, int n, float *roots)
{
float *changes, *sum;
hipMalloc((void**)&changes, sizeof(float) * (n+1));
hipMalloc((void**)&sum, sizeof(float) * (n+1));
//changes = (float *)malloc(sizeof(float) * (n+1));
//sum = (float *)malloc(sizeof(float) * (n+1));
do{
hipLaunchKernelGGL(( init_array), dim3(1), dim3(n) , 0, 0, changes, 0);
hipDeviceSynchronize();
hipLaunchKernelGGL(( split_classes), dim3(1), dim3(n) , 0, 0, numbering, indptr, indices, mask, roots, changes);
hipDeviceSynchronize();
parallel_prefix(changes, sum, n);
hipDeviceSynchronize();
}while(sum[n] > 0);
hipFree(changes);
hipFree(sum);
}
__global__ void get_class_components_global(double *numbering, int *indptr, int *indices, float *mask, int n, float *roots)
{
get_class_components(numbering, indptr, indices, mask, n, roots);
}
__global__ void spanning_tree_depth(int *indptr, int *indices, float *level, float *in_component, int *neighbors, int curr_level)
{
const int i = threadIdx.x;
int curr_node = neighbors[i];
if(level[curr_node] > 0 || in_component[curr_node] == 0)
return;
level[curr_node] = curr_level;
int j = indptr[curr_node];
int num_neighbors = indptr[curr_node+1] - indptr[curr_node];
if(num_neighbors > 0){
__syncthreads();
hipLaunchKernelGGL(( spanning_tree_depth), dim3(1), dim3(num_neighbors) , 0, 0, indptr, indices, level, in_component, indices+j*sizeof(int), curr_level+1);
hipDeviceSynchronize();
}
}
//outputs level of depth forming a spanning tree for a given root in component. the level-node index pair gives a unique
//depth ordering for each node in the component
__host__ __device__ void spanning_tree_numbering(int *indptr, int *indices, float *in_component, float *level, int root, int n)
{
hipLaunchKernelGGL(( init_array), dim3(1), dim3(n) , 0, 0, level, 0);
hipDeviceSynchronize();
level[root] = 1;
int j = indptr[root];
int num_neighbors = indptr[root + 1] - indptr[root];
hipLaunchKernelGGL(( spanning_tree_depth), dim3(1), dim3(num_neighbors) , 0, 0, indptr, indices, level, in_component, indices+j*sizeof(int), 2);
hipDeviceSynchronize();
}
__global__ void compute_component_sizes(float *roots, float *sizes)
{
const int i = threadIdx.x;
int root;
root = roots[i];
sizes[root] += 1;
}
__global__ void richer_neighbors(double *numbering, float *roots, int *indptr, int *indices, int root, float c, float *is_richer_neighbor, float *high_degree, float *neighbors_in_c)
{
const int i = threadIdx.x;
is_richer_neighbor[i] = 0;
high_degree[i] = 0;
neighbors_in_c[i] = 0;
if(roots[i] == root) return;
for(int j = indptr[i]; j < indptr[i+1]; j++){
if(numbering[i] > numbering[indices[j]] && roots[indices[j]] == root){
is_richer_neighbor[i] = 1;
neighbors_in_c[i] += 1;
}
}
if(neighbors_in_c[i] >= 2 / 5 * c){
high_degree[i] = 1;
}
}
__global__ void in_class(double *numbering, float *roots, int *indptr, int *indices, int c, float *is_class_component)
{
const int i = threadIdx.x;
is_class_component[i] = 0;
if(roots[i] == c) is_class_component[i] = 1;
}
__global__ void in_class_special(double *numbering, float *roots, int *indptr, int *indices, int c, float *is_class_component)
{
const int i = threadIdx.x;
is_class_component[i] = -1;
if(roots[i] == c) is_class_component[i] = c;
}
__global__ void is_clique(float *in_component, int *indptr, int *indices, int n, float c, float *full_connected)
{
const int i = threadIdx.x;
full_connected[i] = 0;
if(in_component[i] == 0) return;
int d = 0;
for(int j = indptr[i]; j < indptr[i+1]; j++){
if(in_component[indices[j]] == 1){
d += 1;
}
}
if(d >= c-1){
full_connected[i] = 1;
}
}
__global__ void sum_array_to_list(float *sums, float *list)
{
const int i = threadIdx.x;
if(sums[i+1] == sums[i]) return;
list[(int)sums[i+1] - 1] = i;
}
__global__ void add_i(double *numbering, float *D_sum, int *indptr, int *indices, int n)
{
const int i = threadIdx.x;
if(D_sum[i+1] == D_sum[i]) return;
numbering[i] += D_sum[i+1];
}
__global__ void difference(float *a, float *b, float *r)
{
const int i = threadIdx.x;
r[i] = a[i] * (1 - b[i]);
}
__global__ void find_first(float *a, int *first)
{
const int i = threadIdx.x;
if(a[i+1] == 1 && a[i] == 0) *first = i;
}
__global__ void inc_delta(double *numbering, float *other_array, double delta)
{
const int i = threadIdx.x;
if(other_array[i] == 1) numbering[i] += delta;
}
__global__ void find_common_neighbors(float *is_class_component, int *indptr, int *indices, int f, int s, float *r)
{
const int i = threadIdx.x;
r[i] = 0;
if(is_class_component[i] == 0) return;
int d = 0;
for(int j = indptr[i]; j < indptr[i+1]; j++){
if(indices[j] == f || indices[j] == s){
d += 1;
}
}
if(d == 2) r[i] = 1;
}
__device__ void stratify_none(double *numbering, float *is_class_component, int *indptr, int *indices, double delta, int n, float c)
{
float *D, *C_D, *D_clique, *D_diff, *D_diff_first_neigh, *D_diff_first_neigh_diff, *common_neighbors, *C_D_components;
float *D_sum, *D_clique_sum, *D_diff_sum, *D_diff_first_neigh_sum, *common_neighbors_sum, *C_D_components_sizes;
float rolling_sum;
int i, j, flag, c_root;
int *first, *second;
hipMalloc((void**)&first, sizeof(int));
hipMalloc((void**)&second, sizeof(int));
unsigned int pps_arr_size = (n+1)*sizeof(float);
hipMalloc((void**)&D, pps_arr_size);
hipMalloc((void**)&C_D, pps_arr_size);
hipMalloc((void**)&C_D_components, pps_arr_size);
hipMalloc((void**)&D_clique, pps_arr_size);
hipMalloc((void**)&D_sum, pps_arr_size);
hipMalloc((void**)&D_clique_sum, pps_arr_size);
hipMalloc((void**)&D_diff, pps_arr_size);
hipMalloc((void**)&D_diff_sum, pps_arr_size);
hipMalloc((void**)&D_diff_first_neigh, pps_arr_size);
hipMalloc((void**)&D_diff_first_neigh_sum, pps_arr_size);
hipMalloc((void**)&D_diff_first_neigh_diff, pps_arr_size);
hipMalloc((void**)&common_neighbors, pps_arr_size);
hipMalloc((void**)&common_neighbors_sum, pps_arr_size);
hipMalloc((void**)&C_D_components_sizes, pps_arr_size);
hipLaunchKernelGGL(( init_array), dim3(1), dim3(n) , 0, 0, C_D_components_sizes, 0);
hipLaunchKernelGGL(( stratify_none_getD), dim3(1), dim3(n) , 0, 0, is_class_component, indptr, indices, n, c, D);
hipDeviceSynchronize();
hipLaunchKernelGGL(( difference), dim3(1), dim3(n) , 0, 0, is_class_component, D, C_D);
hipDeviceSynchronize();
get_class_components(numbering, indptr, indices, C_D, n, C_D_components);
hipLaunchKernelGGL(( compute_component_sizes), dim3(1), dim3(n) , 0, 0, C_D_components, C_D_components_sizes);
hipDeviceSynchronize();
for(i = 0, rolling_sum = 0, flag = 0; i < n && flag == 0; i++)
if(C_D_components_sizes[i] >0){
if(C_D_components_sizes[i] > c* 4/5){
flag = 100;
c_root = i;
}else{
rolling_sum += C_D_components_sizes[i];
if(rolling_sum > c *1/5)
flag = 1;
// raise flag, no component can be > 4/5
}
}
if(flag>1){ //component exists
float *level, *adjacencies, *in_component;
hipMalloc((void**)&adjacencies, n*n*sizeof(float));
hipMalloc((void**)&level, pps_arr_size);
hipMalloc((void**)&in_component, pps_arr_size);
hipLaunchKernelGGL(( in_class), dim3(1), dim3(n) , 0, 0, numbering, C_D_components, indptr, indices, c_root, in_component);
hipLaunchKernelGGL(( init_array), dim3(n), dim3(n) , 0, 0, adjacencies, 0);
hipDeviceSynchronize();
hipLaunchKernelGGL(( compute_adjacent_nodes), dim3(1), dim3(n) , 0, 0, indptr, indices, is_class_component, in_component, adjacencies, n);
//add_self<<< 1, n >>>(is_class_component, in_component, adjacencies, n);
spanning_tree_numbering(indptr, indices, in_component, level, c_root, n);
float *arr_even, *arr_odd, *curr_array, *other_array, *tmp_arr_pointer, *sum;
hipMalloc((void**)&arr_odd, n*sizeof(float));
hipMalloc((void**)&arr_even, n*sizeof(float));
hipMalloc((void**)&sum, n*sizeof(float));
int current_depth;
flag = 0;
hipDeviceSynchronize();
other_array = adjacencies + c_root*sizeof(float);
curr_array = arr_even;
tmp_arr_pointer = arr_odd;
other_array[c_root] = 1;
//Flip between even and odd arrays instead of saving old values. When we go above the threshold, we use the other
//array for indices
for(i = 0, j = 0, current_depth = 2; flag == 0; i++){
if(level[i]==current_depth){
if(j > 0){
other_array = curr_array;
curr_array = tmp_arr_pointer;
tmp_arr_pointer = other_array; // for next cycle - needed due to first declaration used to skip copying
}
hipLaunchKernelGGL(( logic_or), dim3(1), dim3(n) , 0, 0, other_array, adjacencies+i*sizeof(float), curr_array);
hipDeviceSynchronize();
parallel_prefix(curr_array, sum, n);
hipDeviceSynchronize();
if(sum[n] > c * 4/5)
flag = 1;
j++;
}
if(flag == 0 && i == n-1){
i = 0; // 0 is either c_root or not in component, increment irrelevant
current_depth +=1;
}
}
hipLaunchKernelGGL(( inc_delta), dim3(1), dim3(n) , 0, 0, numbering, other_array, delta);
}else{
//number of members of D
parallel_prefix(D, D_sum, n);
//get number of neighbors in D of nodes in D
hipLaunchKernelGGL(( is_clique), dim3(1), dim3(n) , 0, 0, D, indptr, indices, n, c, D_clique);
hipDeviceSynchronize();
//check if they are all connected
parallel_prefix(D_clique, D_clique_sum, n);
hipDeviceSynchronize();
if(D_clique_sum[n] == D_sum[n]){
hipLaunchKernelGGL(( add_i), dim3(1), dim3(n) , 0, 0, numbering, D_sum, indptr, indices, n);
}else{
hipLaunchKernelGGL(( difference), dim3(1), dim3(n) , 0, 0, D, D_clique, D_diff);
hipDeviceSynchronize();
parallel_prefix(D_diff, D_diff_sum, n);
hipDeviceSynchronize();
hipLaunchKernelGGL(( find_first), dim3(1), dim3(n) , 0, 0, D_diff_sum, first);
hipDeviceSynchronize();
D_diff_first_neigh[*first] = 1;
for(int j = indptr[*first]; j < indptr[*first + 1]; j++){
if(D_diff[indices[j]] == 1){
D_diff_first_neigh[indices[j]] = 1;
}
}
hipLaunchKernelGGL(( difference), dim3(1), dim3(n) , 0, 0, D_diff, D_diff_first_neigh, D_diff_first_neigh_diff);
hipDeviceSynchronize();
parallel_prefix(D_diff_first_neigh_diff, D_diff_first_neigh_sum, n);
hipDeviceSynchronize();
hipLaunchKernelGGL(( find_first), dim3(1), dim3(n) , 0, 0, D_diff_first_neigh_sum, second);
hipDeviceSynchronize();
hipLaunchKernelGGL(( find_common_neighbors), dim3(1), dim3(n) , 0, 0, D, indptr, indices, *first, *second, common_neighbors);
hipDeviceSynchronize();
parallel_prefix(common_neighbors, common_neighbors_sum, n);
hipDeviceSynchronize();
hipLaunchKernelGGL(( add_i), dim3(1), dim3(n) , 0, 0, numbering, common_neighbors_sum, indptr, indices, n);
}
}
hipFree(D);
hipFree(C_D);
hipFree(C_D_components);
hipFree(D_clique);
hipFree(D_sum);
hipFree(D_clique_sum);
hipFree(D_diff);
hipFree(D_diff_sum);
hipFree(D_diff_first_neigh);
hipFree(D_diff_first_neigh_sum);
hipFree(D_diff_first_neigh_diff);
hipFree(common_neighbors);
hipFree(common_neighbors_sum);
}
__device__ void stratify_high_degree(double *numbering, float *is_class_component, int *indptr, int *indices, double delta, int n, float *is_richer_neighbor, float irn_num, float icc_num)
{
float *adjacencies;
hipMalloc((void**)&adjacencies, n*n*sizeof(float));
hipLaunchKernelGGL(( init_array), dim3(n), dim3(n) , 0, 0, adjacencies, 0);
hipDeviceSynchronize();
hipLaunchKernelGGL(( compute_adjacent_nodes), dim3(1), dim3(n) , 0, 0, indptr, indices, is_class_component, is_richer_neighbor, adjacencies, n);
hipDeviceSynchronize();
float *arr_even, *arr_odd, *curr_array, *other_array, *sum;
hipMalloc((void**)&arr_odd, n*sizeof(float));
hipLaunchKernelGGL(( init_array), dim3(1), dim3(n) , 0, 0, arr_odd, 1); //this array will be the first to be used for the logical and, we will write into arr_even
hipMalloc((void**)&arr_even, n*sizeof(float));
hipMalloc((void**)&sum, n*sizeof(float));
int i, j, flag;
flag = 0;
hipDeviceSynchronize();
//Flip between even and odd arrays instead of saving old values. When we go below the threshold, we use the other
//array for indices
for(i = 0, j = 0; i < n && flag == 0; i++){
if(is_richer_neighbor[i]){
if(j%2 == 0){
curr_array = arr_even;
other_array = arr_odd;
}else{
curr_array = arr_odd;
other_array = arr_even;
}
hipLaunchKernelGGL(( logic_and), dim3(1), dim3(n) , 0, 0, other_array, adjacencies+i*sizeof(float), curr_array);
hipDeviceSynchronize();
if(j > 0){
parallel_prefix(curr_array, sum, n);
hipDeviceSynchronize();
if(sum[n] < icc_num/5)
flag = 1;
}
j++;
}
}
hipLaunchKernelGGL(( inc_delta), dim3(1), dim3(n) , 0, 0, numbering, other_array, delta);
if(j == irn_num){
float *C1_components, *C1_components_sizes, *C1, *component_size;
hipMalloc((void**)&component_size, n*sizeof(float));
hipMalloc((void**)&C1, n*sizeof(float));
hipMalloc((void**)&C1_components, n*sizeof(float));
hipMalloc((void**)&C1_components_sizes, n*sizeof(float));
hipLaunchKernelGGL(( init_array), dim3(1), dim3(n) , 0, 0, component_size, 0);
get_class_components(numbering, indptr, indices, other_array, n, C1_components);
hipLaunchKernelGGL(( compute_component_sizes), dim3(1), dim3(n) , 0, 0, C1_components, C1_components_sizes);
hipDeviceSynchronize();
int max_size_root = 0;
for(i = 0; i < n; i++){
if(C1_components_sizes[i] >0){
if(C1_components_sizes[i] > C1_components_sizes[max_size_root]){
max_size_root = i;
}
}
}
hipLaunchKernelGGL(( in_class), dim3(1), dim3(n) , 0, 0, numbering, C1_components, indptr, indices, max_size_root, C1);
hipDeviceSynchronize();
stratify_none(numbering, C1, indptr, indices, delta / 2, n, C1_components_sizes[max_size_root]);
hipFree(component_size);
hipFree(C1);
hipFree(C1_components);
hipFree(C1_components_sizes);
}
hipFree(arr_even);
hipFree(sum);
hipFree(arr_odd);
hipFree(adjacencies);
}
__device__ void stratify_low_degree(double *numbering, float *is_class_component, int *indptr, int *indices, double delta, int n, float *is_richer_neighbor, float c)
{
float *D, *CuB, *CuB_D, *CuB_D_components, *CuB_D_components_sum;
int *b_root;
int i, j, flag;
unsigned int pps_arr_size = (n+1)*sizeof(float);
hipMalloc((void**)&b_root, sizeof(int));
hipMalloc((void**)&D, pps_arr_size);
hipMalloc((void**)&CuB, pps_arr_size);
hipMalloc((void**)&CuB_D, pps_arr_size);
hipMalloc((void**)&CuB_D_components, pps_arr_size);
hipMalloc((void**)&CuB_D_components_sum, pps_arr_size);
float *level, *adjacencies, *in_component;
hipMalloc((void**)&adjacencies, n*n*sizeof(float));
hipMalloc((void**)&level, pps_arr_size);
hipMalloc((void**)&in_component, pps_arr_size);
hipLaunchKernelGGL(( logic_or), dim3(1), dim3(n) , 0, 0, is_richer_neighbor, is_class_component, CuB);
hipDeviceSynchronize();
hipLaunchKernelGGL(( stratify_lowdegree_getD), dim3(1), dim3(n) , 0, 0, CuB, is_class_component, indptr, indices, n, c, D);
hipDeviceSynchronize();
hipLaunchKernelGGL(( difference), dim3(1), dim3(n) , 0, 0, CuB, D, CuB_D);
hipDeviceSynchronize();
get_class_components(numbering, indptr, indices, CuB_D, n, CuB_D_components);
hipDeviceSynchronize();
parallel_prefix(CuB_D_components, CuB_D_components_sum, n);
hipDeviceSynchronize();
hipLaunchKernelGGL(( find_first), dim3(1), dim3(n) , 0, 0, CuB_D_components_sum, b_root);
hipDeviceSynchronize();
hipLaunchKernelGGL(( in_class), dim3(1), dim3(n) , 0, 0, numbering, CuB_D_components, indptr, indices, *b_root, in_component);
hipLaunchKernelGGL(( init_array), dim3(n), dim3(n) , 0, 0, adjacencies, 0);
hipDeviceSynchronize();
hipLaunchKernelGGL(( compute_adjacent_nodes), dim3(1), dim3(n) , 0, 0, indptr, indices, is_class_component, in_component, adjacencies, n);
//add_self<<< 1, n >>>(is_class_component, in_component, adjacencies, n);
spanning_tree_numbering(indptr, indices, in_component, level, *b_root, n);
float *arr_even, *arr_odd, *curr_array, *other_array, *tmp_arr_pointer, *sum;
hipMalloc((void**)&arr_odd, n*sizeof(float));
hipMalloc((void**)&arr_even, n*sizeof(float));
hipMalloc((void**)&sum, n*sizeof(float));
int current_depth;
flag = 0;
hipDeviceSynchronize();
other_array = adjacencies + (*b_root) * sizeof(float);
curr_array = arr_even;
tmp_arr_pointer = arr_odd;
other_array[*b_root] = 1;
//Flip between even and odd arrays instead of saving old values. When we go above the threshold, we use the other
//array for indices
for(i = 0, j = 0, current_depth = 2; flag == 0; i++){
if(level[i]==current_depth){
if(j > 0){
other_array = curr_array;
curr_array = tmp_arr_pointer;
tmp_arr_pointer = other_array; // for next cycle - needed due to first declaration used to skip copying
}
hipLaunchKernelGGL(( logic_or), dim3(1), dim3(n) , 0, 0, other_array, adjacencies+i*sizeof(float), curr_array);
hipDeviceSynchronize();
parallel_prefix(curr_array, sum, n);
hipDeviceSynchronize();
if(sum[n] > c * 4/5)
flag = 1;
j++;
}
if(flag == 0 && i == n-1){
i = -1;
current_depth +=1;
}
}
hipLaunchKernelGGL(( logic_and), dim3(1), dim3(n) , 0, 0, other_array, is_class_component, other_array);
hipLaunchKernelGGL(( inc_delta), dim3(1), dim3(n) , 0, 0, numbering, other_array, delta);
float *C1_components, *C1_components_sizes, *C1, *component_size, *C_A;
hipMalloc((void**)&component_size, n*sizeof(float));
hipMalloc((void**)&C1, n*sizeof(float));
hipMalloc((void**)&C1_components, n*sizeof(float));
hipMalloc((void**)&C_A, n*sizeof(float));
hipMalloc((void**)&C1_components_sizes, n*sizeof(float));
hipLaunchKernelGGL(( init_array), dim3(1), dim3(n) , 0, 0, component_size, 0);
hipLaunchKernelGGL(( difference), dim3(1), dim3(n) , 0, 0, is_class_component, other_array, C_A);
hipDeviceSynchronize();
get_class_components(numbering, indptr, indices, C_A, n, C1_components);
hipDeviceSynchronize();
hipLaunchKernelGGL(( compute_component_sizes), dim3(1), dim3(n) , 0, 0, C1_components, C1_components_sizes);
hipDeviceSynchronize();
int max_size_root = 0;
for(i = 0; i < n; i++){
if(C1_components_sizes[i] >0){
if(C1_components_sizes[i] > C1_components_sizes[max_size_root]){
max_size_root = i;
}
}
}
if(C1_components_sizes[max_size_root] > c * 4/5){
hipLaunchKernelGGL(( in_class_special), dim3(1), dim3(n) , 0, 0, numbering, C1_components, indptr, indices, max_size_root, C1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( stratify), dim3(1), dim3(n) , 0, 0, numbering, C1, indptr, indices, delta / 2, n);
hipDeviceSynchronize();
}
hipFree(adjacencies);
hipFree(level);
hipFree(in_component);
hipFree(component_size);
hipFree(C1);
hipFree(C1_components);
hipFree(C_A);
hipFree(C1_components_sizes);
hipFree(arr_odd);
hipFree(arr_even);
hipFree(sum);
hipFree(b_root);
hipFree(D);
hipFree(CuB);
hipFree(CuB_D);
hipFree(CuB_D_components);
hipFree(CuB_D_components_sum);
}
__global__ void stratify(double *numbering, float *roots, int *indptr, int *indices, double delta, int n)
{
const int i = threadIdx.x;
if(roots[i] != i) return;
float *is_richer_neighbor, *high_degree, *is_class_component, *neighbors_in_c;
float *irn_sum, *hd_sum, *icc_sum, *nic_sum;
unsigned int pps_arr_size = (n+1)*sizeof(float);
hipMalloc((void**)&is_richer_neighbor, pps_arr_size);
hipMalloc((void**)&high_degree, pps_arr_size);
hipMalloc((void**)&is_class_component, pps_arr_size);
hipMalloc((void**)&neighbors_in_c, pps_arr_size);
hipMalloc((void**)&irn_sum, pps_arr_size);
hipMalloc((void**)&hd_sum, pps_arr_size);
hipMalloc((void**)&icc_sum, pps_arr_size);
hipMalloc((void**)&nic_sum, pps_arr_size);
hipLaunchKernelGGL(( in_class), dim3(1), dim3(n) , 0, 0, numbering, roots, indptr, indices, roots[i], is_class_component);
hipDeviceSynchronize();
parallel_prefix(is_class_component, icc_sum, n);
hipDeviceSynchronize();
//if(icc_sum[n] <= 1) return;
hipLaunchKernelGGL(( richer_neighbors), dim3(1), dim3(n) , 0, 0, numbering, roots, indptr, indices, roots[i], icc_sum[n], is_richer_neighbor, high_degree, neighbors_in_c);
hipDeviceSynchronize();
parallel_prefix(is_richer_neighbor, irn_sum, n);
hipDeviceSynchronize();
if(irn_sum[n] == 0)
stratify_none(numbering, is_class_component, indptr, indices, delta, n, icc_sum[n]);
else{
parallel_prefix(high_degree, hd_sum, n);
hipDeviceSynchronize();
if(hd_sum[n] >= irn_sum[n])
stratify_high_degree(numbering, is_class_component, indptr, indices, delta, n, is_richer_neighbor, irn_sum[n], icc_sum[n]);
else
stratify_low_degree(numbering, is_class_component, indptr, indices, delta, n, is_richer_neighbor, icc_sum[n]);
}
hipDeviceSynchronize();
hipFree(is_richer_neighbor);
hipFree(high_degree);
hipFree(is_class_component);
hipFree(neighbors_in_c);
hipFree(irn_sum);
hipFree(hd_sum);
hipFree(icc_sum);
hipFree(nic_sum);
}
void load_graph_sizes(char *filename, int *n, int *j)
{
FILE *fp = fopen(filename, "r");
int n1, j1;
if(fp != NULL){
fscanf(fp, "%d %d ", &n1, &j1);
*n = n1 - 1;
*j = j1;
fclose(fp);
}
}
void load_graph(char *filename, int *indptr, int *indices)
{
FILE *fp = fopen(filename, "r");
int n, k, d;
if(fp != NULL){
fscanf(fp, "%d %d %d ", &n, &k, &d);
for(int i=0; i<n; i++){
fscanf(fp, "%d ", &indptr[i]);
}
for(int i=0; i<k; i++){
fscanf(fp, "%d ", &indices[i]);
}
fclose(fp);
}
}
__global__ void print_array_global(double *a)
{
const int i = threadIdx.x;
printf("%lf ", a[i]);
}
int main()
{
int N, k;
double *numbering, *numbering_gpu;
float *mask, *roots;
int *indptr, *indices;
int *indptr_gpu, *indices_gpu;
char *filename = "../graphs/graph_10.txt";
hipError_t cudaerr;
load_graph_sizes(filename, &N, &k);
if(!N || !k){
printf("Errore durante la lettura del file\n");
return 1;
}
indptr = (int *)malloc((N+1) * sizeof(int));
indices = (int *)malloc((k+1) * sizeof(int));
load_graph(filename, indptr, indices);
numbering = (double *)malloc((N+1)*sizeof(double));
hipMalloc((void**)&numbering_gpu, (N+1)*sizeof(double));
hipMalloc((void**)&mask, (N+1)*sizeof(float));
hipMalloc((void**)&roots, (N+1)*sizeof(float));
hipMalloc((void**)&indptr_gpu, (N+1)*sizeof(int));
hipMalloc((void**)&indices_gpu, (k+1)*sizeof(int));
hipMemcpy(indptr_gpu, indptr, (N+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(indices_gpu, indices, (k+1)*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( init_array_double), dim3(1), dim3(N) , 0, 0, numbering_gpu, 0);
hipLaunchKernelGGL(( init_array), dim3(1), dim3(N) , 0, 0, mask, 1);
// hipLaunchKernelGGL(( init_array_consecutive), dim3(1), dim3(N) , 0, 0, roots);
double delta = pow(8, ceil(log(N) / log(1.25)));
int flag = 1;
int extra_space = N / 16 + N / 16*16 + 1;
unsigned int sharedmemsize = sizeof(float) * 2 * (N + extra_space + 10);
set<double> numbering_copy;
cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
printf("INITS kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
while(flag && delta >= 1){
hipLaunchKernelGGL(( init_array_consecutive), dim3(1), dim3(N) , 0, 0, roots);
hipLaunchKernelGGL(( get_class_components_global), dim3(1), dim3(1), sharedmemsize , 0, numbering_gpu, indptr_gpu, indices_gpu, mask, N, roots);
cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess){
printf("CLASS COMPONENT kernel launch failed with error \"%s: %s\".\n",
hipGetErrorName(cudaerr), hipGetErrorString(cudaerr));
}
hipLaunchKernelGGL(( stratify), dim3(1), dim3(N), sharedmemsize , 0, numbering_gpu, roots, indptr_gpu, indices_gpu, delta, N);
cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
printf("STRATIFY kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
flag = 0;
delta /= 8;
int oldsize = 0;
// print_array_global<<< 1, N >>>(numbering_gpu);
cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
printf("PRINT kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
hipMemcpy(numbering, numbering_gpu, N*sizeof(double), hipMemcpyDeviceToHost);
numbering_copy.clear();
for(int i=0; i<N && !flag; i++){
numbering_copy.insert(numbering[i]);
if(numbering_copy.size() == oldsize)
flag = 1;
oldsize = numbering_copy.size();
}
}
for(int i=0; i<N; i++)
printf("%lf ", numbering[i]);
printf("\n");
printf("%d\n", numbering_copy.size());
hipFree(numbering_gpu);
hipFree(mask);
hipFree(indptr_gpu);
hipFree(indices_gpu);
hipFree(roots);
free(indptr);
free(indices);
free(numbering);
system("pause");
return 0;
} | c747bf67dd2c24e62c1d28222726e14223af0af5.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <set>
#include <scan.cu>
#include <stratify_none.cu>
#include <stratify_high_degree.cu>
#include <stratify_low_degree.cu>
using namespace std;
__global__ void stratify(double *numbering, float *roots, int *indptr, int *indices, double delta, int n);
__host__ __device__ void print_array(float *a, int n)
{
for(int i=0; i<n; i++)
printf("%f ", a[i]);
printf("\\n");
}
__host__ __device__ void parallel_prefix(float *d_idata, float *d_odata, int num_elements)
{
num_elements += 1;
float** g_scanBlockSums;
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numElts = num_elements;
int level = 0;
do
{
unsigned int numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
{
level++;
}
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSums = (float**) malloc(level * sizeof(float*));
numElts = num_elements;
level = 0;
do
{
unsigned int numBlocks =
max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1)
{
cudaMalloc((void**) &g_scanBlockSums[level++],
numBlocks * sizeof(float));
}
numElts = numBlocks;
} while (numElts > 1);
prescanArrayRecursive(d_odata, d_idata, num_elements, 0, g_scanBlockSums);
}
__global__ void copy_array(double *a, double *b)
{
const int i = (blockIdx.x * blockDim.x) + threadIdx.x;
b[i] = a[i];
}
__global__ void init_array(float *arr, float val)
{
const int i = (blockIdx.x * blockDim.x) + threadIdx.x;
arr[i] = val;
}
__global__ void init_array_double(double *arr, double val)
{
const int i = (blockIdx.x * blockDim.x) + threadIdx.x;
arr[i] = val;
}
__global__ void init_array_consecutive(float *arr)
{
const int i = (blockIdx.x * blockDim.x) + threadIdx.x;
arr[i] = i;
}
__global__ void split_classes(double *numbering, int *indptr, int *indices, float *mask, float *roots, float *changes)
{
const int i = threadIdx.x;
float min = roots[i];
if(mask[i] == 0){
roots[i] = -1;
return;
}
for(int j = indptr[i]; j < indptr[i+1]; j++){
if(mask[indices[j]] == 1 && numbering[i] == numbering[indices[j]] && roots[indices[j]] < min){
min = roots[indices[j]];
}
}
if(min != roots[i]){
roots[i] = min;
changes[i] += 1;
}
}
__host__ __device__ void get_class_components(double *numbering, int *indptr, int *indices, float *mask, int n, float *roots)
{
float *changes, *sum;
cudaMalloc((void**)&changes, sizeof(float) * (n+1));
cudaMalloc((void**)&sum, sizeof(float) * (n+1));
//changes = (float *)malloc(sizeof(float) * (n+1));
//sum = (float *)malloc(sizeof(float) * (n+1));
do{
init_array<<< 1, n >>>(changes, 0);
cudaDeviceSynchronize();
split_classes<<< 1, n >>>(numbering, indptr, indices, mask, roots, changes);
cudaDeviceSynchronize();
parallel_prefix(changes, sum, n);
cudaDeviceSynchronize();
}while(sum[n] > 0);
cudaFree(changes);
cudaFree(sum);
}
__global__ void get_class_components_global(double *numbering, int *indptr, int *indices, float *mask, int n, float *roots)
{
get_class_components(numbering, indptr, indices, mask, n, roots);
}
__global__ void spanning_tree_depth(int *indptr, int *indices, float *level, float *in_component, int *neighbors, int curr_level)
{
const int i = threadIdx.x;
int curr_node = neighbors[i];
if(level[curr_node] > 0 || in_component[curr_node] == 0)
return;
level[curr_node] = curr_level;
int j = indptr[curr_node];
int num_neighbors = indptr[curr_node+1] - indptr[curr_node];
if(num_neighbors > 0){
__syncthreads();
spanning_tree_depth<<< 1, num_neighbors >>>(indptr, indices, level, in_component, indices+j*sizeof(int), curr_level+1);
cudaDeviceSynchronize();
}
}
//outputs level of depth forming a spanning tree for a given root in component. the level-node index pair gives a unique
//depth ordering for each node in the component
__host__ __device__ void spanning_tree_numbering(int *indptr, int *indices, float *in_component, float *level, int root, int n)
{
init_array<<< 1, n >>>(level, 0);
cudaDeviceSynchronize();
level[root] = 1;
int j = indptr[root];
int num_neighbors = indptr[root + 1] - indptr[root];
spanning_tree_depth<<< 1, num_neighbors >>>(indptr, indices, level, in_component, indices+j*sizeof(int), 2);
cudaDeviceSynchronize();
}
__global__ void compute_component_sizes(float *roots, float *sizes)
{
const int i = threadIdx.x;
int root;
root = roots[i];
sizes[root] += 1;
}
__global__ void richer_neighbors(double *numbering, float *roots, int *indptr, int *indices, int root, float c, float *is_richer_neighbor, float *high_degree, float *neighbors_in_c)
{
const int i = threadIdx.x;
is_richer_neighbor[i] = 0;
high_degree[i] = 0;
neighbors_in_c[i] = 0;
if(roots[i] == root) return;
for(int j = indptr[i]; j < indptr[i+1]; j++){
if(numbering[i] > numbering[indices[j]] && roots[indices[j]] == root){
is_richer_neighbor[i] = 1;
neighbors_in_c[i] += 1;
}
}
if(neighbors_in_c[i] >= 2 / 5 * c){
high_degree[i] = 1;
}
}
__global__ void in_class(double *numbering, float *roots, int *indptr, int *indices, int c, float *is_class_component)
{
const int i = threadIdx.x;
is_class_component[i] = 0;
if(roots[i] == c) is_class_component[i] = 1;
}
__global__ void in_class_special(double *numbering, float *roots, int *indptr, int *indices, int c, float *is_class_component)
{
const int i = threadIdx.x;
is_class_component[i] = -1;
if(roots[i] == c) is_class_component[i] = c;
}
__global__ void is_clique(float *in_component, int *indptr, int *indices, int n, float c, float *full_connected)
{
const int i = threadIdx.x;
full_connected[i] = 0;
if(in_component[i] == 0) return;
int d = 0;
for(int j = indptr[i]; j < indptr[i+1]; j++){
if(in_component[indices[j]] == 1){
d += 1;
}
}
if(d >= c-1){
full_connected[i] = 1;
}
}
__global__ void sum_array_to_list(float *sums, float *list)
{
const int i = threadIdx.x;
if(sums[i+1] == sums[i]) return;
list[(int)sums[i+1] - 1] = i;
}
__global__ void add_i(double *numbering, float *D_sum, int *indptr, int *indices, int n)
{
const int i = threadIdx.x;
if(D_sum[i+1] == D_sum[i]) return;
numbering[i] += D_sum[i+1];
}
__global__ void difference(float *a, float *b, float *r)
{
const int i = threadIdx.x;
r[i] = a[i] * (1 - b[i]);
}
__global__ void find_first(float *a, int *first)
{
const int i = threadIdx.x;
if(a[i+1] == 1 && a[i] == 0) *first = i;
}
__global__ void inc_delta(double *numbering, float *other_array, double delta)
{
const int i = threadIdx.x;
if(other_array[i] == 1) numbering[i] += delta;
}
__global__ void find_common_neighbors(float *is_class_component, int *indptr, int *indices, int f, int s, float *r)
{
const int i = threadIdx.x;
r[i] = 0;
if(is_class_component[i] == 0) return;
int d = 0;
for(int j = indptr[i]; j < indptr[i+1]; j++){
if(indices[j] == f || indices[j] == s){
d += 1;
}
}
if(d == 2) r[i] = 1;
}
__device__ void stratify_none(double *numbering, float *is_class_component, int *indptr, int *indices, double delta, int n, float c)
{
float *D, *C_D, *D_clique, *D_diff, *D_diff_first_neigh, *D_diff_first_neigh_diff, *common_neighbors, *C_D_components;
float *D_sum, *D_clique_sum, *D_diff_sum, *D_diff_first_neigh_sum, *common_neighbors_sum, *C_D_components_sizes;
float rolling_sum;
int i, j, flag, c_root;
int *first, *second;
cudaMalloc((void**)&first, sizeof(int));
cudaMalloc((void**)&second, sizeof(int));
unsigned int pps_arr_size = (n+1)*sizeof(float);
cudaMalloc((void**)&D, pps_arr_size);
cudaMalloc((void**)&C_D, pps_arr_size);
cudaMalloc((void**)&C_D_components, pps_arr_size);
cudaMalloc((void**)&D_clique, pps_arr_size);
cudaMalloc((void**)&D_sum, pps_arr_size);
cudaMalloc((void**)&D_clique_sum, pps_arr_size);
cudaMalloc((void**)&D_diff, pps_arr_size);
cudaMalloc((void**)&D_diff_sum, pps_arr_size);
cudaMalloc((void**)&D_diff_first_neigh, pps_arr_size);
cudaMalloc((void**)&D_diff_first_neigh_sum, pps_arr_size);
cudaMalloc((void**)&D_diff_first_neigh_diff, pps_arr_size);
cudaMalloc((void**)&common_neighbors, pps_arr_size);
cudaMalloc((void**)&common_neighbors_sum, pps_arr_size);
cudaMalloc((void**)&C_D_components_sizes, pps_arr_size);
init_array<<< 1, n >>>(C_D_components_sizes, 0);
stratify_none_getD<<< 1, n >>>(is_class_component, indptr, indices, n, c, D);
cudaDeviceSynchronize();
difference<<< 1, n >>>(is_class_component, D, C_D);
cudaDeviceSynchronize();
get_class_components(numbering, indptr, indices, C_D, n, C_D_components);
compute_component_sizes<<< 1, n >>>(C_D_components, C_D_components_sizes);
cudaDeviceSynchronize();
for(i = 0, rolling_sum = 0, flag = 0; i < n && flag == 0; i++)
if(C_D_components_sizes[i] >0){
if(C_D_components_sizes[i] > c* 4/5){
flag = 100;
c_root = i;
}else{
rolling_sum += C_D_components_sizes[i];
if(rolling_sum > c *1/5)
flag = 1;
// raise flag, no component can be > 4/5
}
}
if(flag>1){ //component exists
float *level, *adjacencies, *in_component;
cudaMalloc((void**)&adjacencies, n*n*sizeof(float));
cudaMalloc((void**)&level, pps_arr_size);
cudaMalloc((void**)&in_component, pps_arr_size);
in_class<<< 1, n >>>(numbering, C_D_components, indptr, indices, c_root, in_component);
init_array<<< n, n >>>(adjacencies, 0);
cudaDeviceSynchronize();
compute_adjacent_nodes<<< 1, n >>>(indptr, indices, is_class_component, in_component, adjacencies, n);
//add_self<<< 1, n >>>(is_class_component, in_component, adjacencies, n);
spanning_tree_numbering(indptr, indices, in_component, level, c_root, n);
float *arr_even, *arr_odd, *curr_array, *other_array, *tmp_arr_pointer, *sum;
cudaMalloc((void**)&arr_odd, n*sizeof(float));
cudaMalloc((void**)&arr_even, n*sizeof(float));
cudaMalloc((void**)&sum, n*sizeof(float));
int current_depth;
flag = 0;
cudaDeviceSynchronize();
other_array = adjacencies + c_root*sizeof(float);
curr_array = arr_even;
tmp_arr_pointer = arr_odd;
other_array[c_root] = 1;
//Flip between even and odd arrays instead of saving old values. When we go above the threshold, we use the other
//array for indices
for(i = 0, j = 0, current_depth = 2; flag == 0; i++){
if(level[i]==current_depth){
if(j > 0){
other_array = curr_array;
curr_array = tmp_arr_pointer;
tmp_arr_pointer = other_array; // for next cycle - needed due to first declaration used to skip copying
}
logic_or<<< 1, n >>>(other_array, adjacencies+i*sizeof(float), curr_array);
cudaDeviceSynchronize();
parallel_prefix(curr_array, sum, n);
cudaDeviceSynchronize();
if(sum[n] > c * 4/5)
flag = 1;
j++;
}
if(flag == 0 && i == n-1){
i = 0; // 0 is either c_root or not in component, increment irrelevant
current_depth +=1;
}
}
inc_delta<<< 1, n >>>(numbering, other_array, delta);
}else{
//number of members of D
parallel_prefix(D, D_sum, n);
//get number of neighbors in D of nodes in D
is_clique<<< 1, n >>>(D, indptr, indices, n, c, D_clique);
cudaDeviceSynchronize();
//check if they are all connected
parallel_prefix(D_clique, D_clique_sum, n);
cudaDeviceSynchronize();
if(D_clique_sum[n] == D_sum[n]){
add_i<<< 1, n >>>(numbering, D_sum, indptr, indices, n);
}else{
difference<<< 1, n >>>(D, D_clique, D_diff);
cudaDeviceSynchronize();
parallel_prefix(D_diff, D_diff_sum, n);
cudaDeviceSynchronize();
find_first<<< 1, n >>>(D_diff_sum, first);
cudaDeviceSynchronize();
D_diff_first_neigh[*first] = 1;
for(int j = indptr[*first]; j < indptr[*first + 1]; j++){
if(D_diff[indices[j]] == 1){
D_diff_first_neigh[indices[j]] = 1;
}
}
difference<<< 1, n >>>(D_diff, D_diff_first_neigh, D_diff_first_neigh_diff);
cudaDeviceSynchronize();
parallel_prefix(D_diff_first_neigh_diff, D_diff_first_neigh_sum, n);
cudaDeviceSynchronize();
find_first<<< 1, n >>>(D_diff_first_neigh_sum, second);
cudaDeviceSynchronize();
find_common_neighbors<<< 1, n >>>(D, indptr, indices, *first, *second, common_neighbors);
cudaDeviceSynchronize();
parallel_prefix(common_neighbors, common_neighbors_sum, n);
cudaDeviceSynchronize();
add_i<<< 1, n >>>(numbering, common_neighbors_sum, indptr, indices, n);
}
}
cudaFree(D);
cudaFree(C_D);
cudaFree(C_D_components);
cudaFree(D_clique);
cudaFree(D_sum);
cudaFree(D_clique_sum);
cudaFree(D_diff);
cudaFree(D_diff_sum);
cudaFree(D_diff_first_neigh);
cudaFree(D_diff_first_neigh_sum);
cudaFree(D_diff_first_neigh_diff);
cudaFree(common_neighbors);
cudaFree(common_neighbors_sum);
}
__device__ void stratify_high_degree(double *numbering, float *is_class_component, int *indptr, int *indices, double delta, int n, float *is_richer_neighbor, float irn_num, float icc_num)
{
float *adjacencies;
cudaMalloc((void**)&adjacencies, n*n*sizeof(float));
init_array<<< n, n >>>(adjacencies, 0);
cudaDeviceSynchronize();
compute_adjacent_nodes<<< 1, n >>>(indptr, indices, is_class_component, is_richer_neighbor, adjacencies, n);
cudaDeviceSynchronize();
float *arr_even, *arr_odd, *curr_array, *other_array, *sum;
cudaMalloc((void**)&arr_odd, n*sizeof(float));
init_array<<< 1, n >>>(arr_odd, 1); //this array will be the first to be used for the logical and, we will write into arr_even
cudaMalloc((void**)&arr_even, n*sizeof(float));
cudaMalloc((void**)&sum, n*sizeof(float));
int i, j, flag;
flag = 0;
cudaDeviceSynchronize();
//Flip between even and odd arrays instead of saving old values. When we go below the threshold, we use the other
//array for indices
for(i = 0, j = 0; i < n && flag == 0; i++){
if(is_richer_neighbor[i]){
if(j%2 == 0){
curr_array = arr_even;
other_array = arr_odd;
}else{
curr_array = arr_odd;
other_array = arr_even;
}
logic_and<<< 1, n >>>(other_array, adjacencies+i*sizeof(float), curr_array);
cudaDeviceSynchronize();
if(j > 0){
parallel_prefix(curr_array, sum, n);
cudaDeviceSynchronize();
if(sum[n] < icc_num/5)
flag = 1;
}
j++;
}
}
inc_delta<<< 1, n >>>(numbering, other_array, delta);
if(j == irn_num){
float *C1_components, *C1_components_sizes, *C1, *component_size;
cudaMalloc((void**)&component_size, n*sizeof(float));
cudaMalloc((void**)&C1, n*sizeof(float));
cudaMalloc((void**)&C1_components, n*sizeof(float));
cudaMalloc((void**)&C1_components_sizes, n*sizeof(float));
init_array<<< 1, n >>>(component_size, 0);
get_class_components(numbering, indptr, indices, other_array, n, C1_components);
compute_component_sizes<<< 1, n >>>(C1_components, C1_components_sizes);
cudaDeviceSynchronize();
int max_size_root = 0;
for(i = 0; i < n; i++){
if(C1_components_sizes[i] >0){
if(C1_components_sizes[i] > C1_components_sizes[max_size_root]){
max_size_root = i;
}
}
}
in_class<<< 1, n >>>(numbering, C1_components, indptr, indices, max_size_root, C1);
cudaDeviceSynchronize();
stratify_none(numbering, C1, indptr, indices, delta / 2, n, C1_components_sizes[max_size_root]);
cudaFree(component_size);
cudaFree(C1);
cudaFree(C1_components);
cudaFree(C1_components_sizes);
}
cudaFree(arr_even);
cudaFree(sum);
cudaFree(arr_odd);
cudaFree(adjacencies);
}
__device__ void stratify_low_degree(double *numbering, float *is_class_component, int *indptr, int *indices, double delta, int n, float *is_richer_neighbor, float c)
{
float *D, *CuB, *CuB_D, *CuB_D_components, *CuB_D_components_sum;
int *b_root;
int i, j, flag;
unsigned int pps_arr_size = (n+1)*sizeof(float);
cudaMalloc((void**)&b_root, sizeof(int));
cudaMalloc((void**)&D, pps_arr_size);
cudaMalloc((void**)&CuB, pps_arr_size);
cudaMalloc((void**)&CuB_D, pps_arr_size);
cudaMalloc((void**)&CuB_D_components, pps_arr_size);
cudaMalloc((void**)&CuB_D_components_sum, pps_arr_size);
float *level, *adjacencies, *in_component;
cudaMalloc((void**)&adjacencies, n*n*sizeof(float));
cudaMalloc((void**)&level, pps_arr_size);
cudaMalloc((void**)&in_component, pps_arr_size);
logic_or<<< 1, n >>>(is_richer_neighbor, is_class_component, CuB);
cudaDeviceSynchronize();
stratify_lowdegree_getD<<< 1, n >>>(CuB, is_class_component, indptr, indices, n, c, D);
cudaDeviceSynchronize();
difference<<< 1, n >>>(CuB, D, CuB_D);
cudaDeviceSynchronize();
get_class_components(numbering, indptr, indices, CuB_D, n, CuB_D_components);
cudaDeviceSynchronize();
parallel_prefix(CuB_D_components, CuB_D_components_sum, n);
cudaDeviceSynchronize();
find_first<<< 1, n >>>(CuB_D_components_sum, b_root);
cudaDeviceSynchronize();
in_class<<< 1, n >>>(numbering, CuB_D_components, indptr, indices, *b_root, in_component);
init_array<<< n, n >>>(adjacencies, 0);
cudaDeviceSynchronize();
compute_adjacent_nodes<<< 1, n >>>(indptr, indices, is_class_component, in_component, adjacencies, n);
//add_self<<< 1, n >>>(is_class_component, in_component, adjacencies, n);
spanning_tree_numbering(indptr, indices, in_component, level, *b_root, n);
float *arr_even, *arr_odd, *curr_array, *other_array, *tmp_arr_pointer, *sum;
cudaMalloc((void**)&arr_odd, n*sizeof(float));
cudaMalloc((void**)&arr_even, n*sizeof(float));
cudaMalloc((void**)&sum, n*sizeof(float));
int current_depth;
flag = 0;
cudaDeviceSynchronize();
other_array = adjacencies + (*b_root) * sizeof(float);
curr_array = arr_even;
tmp_arr_pointer = arr_odd;
other_array[*b_root] = 1;
//Flip between even and odd arrays instead of saving old values. When we go above the threshold, we use the other
//array for indices
for(i = 0, j = 0, current_depth = 2; flag == 0; i++){
if(level[i]==current_depth){
if(j > 0){
other_array = curr_array;
curr_array = tmp_arr_pointer;
tmp_arr_pointer = other_array; // for next cycle - needed due to first declaration used to skip copying
}
logic_or<<< 1, n >>>(other_array, adjacencies+i*sizeof(float), curr_array);
cudaDeviceSynchronize();
parallel_prefix(curr_array, sum, n);
cudaDeviceSynchronize();
if(sum[n] > c * 4/5)
flag = 1;
j++;
}
if(flag == 0 && i == n-1){
i = -1;
current_depth +=1;
}
}
logic_and<<< 1, n >>>(other_array, is_class_component, other_array);
inc_delta<<< 1, n >>>(numbering, other_array, delta);
float *C1_components, *C1_components_sizes, *C1, *component_size, *C_A;
cudaMalloc((void**)&component_size, n*sizeof(float));
cudaMalloc((void**)&C1, n*sizeof(float));
cudaMalloc((void**)&C1_components, n*sizeof(float));
cudaMalloc((void**)&C_A, n*sizeof(float));
cudaMalloc((void**)&C1_components_sizes, n*sizeof(float));
init_array<<< 1, n >>>(component_size, 0);
difference<<< 1, n >>>(is_class_component, other_array, C_A);
cudaDeviceSynchronize();
get_class_components(numbering, indptr, indices, C_A, n, C1_components);
cudaDeviceSynchronize();
compute_component_sizes<<< 1, n >>>(C1_components, C1_components_sizes);
cudaDeviceSynchronize();
int max_size_root = 0;
for(i = 0; i < n; i++){
if(C1_components_sizes[i] >0){
if(C1_components_sizes[i] > C1_components_sizes[max_size_root]){
max_size_root = i;
}
}
}
if(C1_components_sizes[max_size_root] > c * 4/5){
in_class_special<<< 1, n >>>(numbering, C1_components, indptr, indices, max_size_root, C1);
cudaDeviceSynchronize();
stratify<<< 1, n >>>(numbering, C1, indptr, indices, delta / 2, n);
cudaDeviceSynchronize();
}
cudaFree(adjacencies);
cudaFree(level);
cudaFree(in_component);
cudaFree(component_size);
cudaFree(C1);
cudaFree(C1_components);
cudaFree(C_A);
cudaFree(C1_components_sizes);
cudaFree(arr_odd);
cudaFree(arr_even);
cudaFree(sum);
cudaFree(b_root);
cudaFree(D);
cudaFree(CuB);
cudaFree(CuB_D);
cudaFree(CuB_D_components);
cudaFree(CuB_D_components_sum);
}
__global__ void stratify(double *numbering, float *roots, int *indptr, int *indices, double delta, int n)
{
const int i = threadIdx.x;
if(roots[i] != i) return;
float *is_richer_neighbor, *high_degree, *is_class_component, *neighbors_in_c;
float *irn_sum, *hd_sum, *icc_sum, *nic_sum;
unsigned int pps_arr_size = (n+1)*sizeof(float);
cudaMalloc((void**)&is_richer_neighbor, pps_arr_size);
cudaMalloc((void**)&high_degree, pps_arr_size);
cudaMalloc((void**)&is_class_component, pps_arr_size);
cudaMalloc((void**)&neighbors_in_c, pps_arr_size);
cudaMalloc((void**)&irn_sum, pps_arr_size);
cudaMalloc((void**)&hd_sum, pps_arr_size);
cudaMalloc((void**)&icc_sum, pps_arr_size);
cudaMalloc((void**)&nic_sum, pps_arr_size);
in_class<<< 1, n >>>(numbering, roots, indptr, indices, roots[i], is_class_component);
cudaDeviceSynchronize();
parallel_prefix(is_class_component, icc_sum, n);
cudaDeviceSynchronize();
//if(icc_sum[n] <= 1) return;
richer_neighbors<<< 1, n >>>(numbering, roots, indptr, indices, roots[i], icc_sum[n], is_richer_neighbor, high_degree, neighbors_in_c);
cudaDeviceSynchronize();
parallel_prefix(is_richer_neighbor, irn_sum, n);
cudaDeviceSynchronize();
if(irn_sum[n] == 0)
stratify_none(numbering, is_class_component, indptr, indices, delta, n, icc_sum[n]);
else{
parallel_prefix(high_degree, hd_sum, n);
cudaDeviceSynchronize();
if(hd_sum[n] >= irn_sum[n])
stratify_high_degree(numbering, is_class_component, indptr, indices, delta, n, is_richer_neighbor, irn_sum[n], icc_sum[n]);
else
stratify_low_degree(numbering, is_class_component, indptr, indices, delta, n, is_richer_neighbor, icc_sum[n]);
}
cudaDeviceSynchronize();
cudaFree(is_richer_neighbor);
cudaFree(high_degree);
cudaFree(is_class_component);
cudaFree(neighbors_in_c);
cudaFree(irn_sum);
cudaFree(hd_sum);
cudaFree(icc_sum);
cudaFree(nic_sum);
}
void load_graph_sizes(char *filename, int *n, int *j)
{
FILE *fp = fopen(filename, "r");
int n1, j1;
if(fp != NULL){
fscanf(fp, "%d %d ", &n1, &j1);
*n = n1 - 1;
*j = j1;
fclose(fp);
}
}
void load_graph(char *filename, int *indptr, int *indices)
{
FILE *fp = fopen(filename, "r");
int n, k, d;
if(fp != NULL){
fscanf(fp, "%d %d %d ", &n, &k, &d);
for(int i=0; i<n; i++){
fscanf(fp, "%d ", &indptr[i]);
}
for(int i=0; i<k; i++){
fscanf(fp, "%d ", &indices[i]);
}
fclose(fp);
}
}
__global__ void print_array_global(double *a)
{
const int i = threadIdx.x;
printf("%lf ", a[i]);
}
int main()
{
int N, k;
double *numbering, *numbering_gpu;
float *mask, *roots;
int *indptr, *indices;
int *indptr_gpu, *indices_gpu;
char *filename = "../graphs/graph_10.txt";
cudaError_t cudaerr;
load_graph_sizes(filename, &N, &k);
if(!N || !k){
printf("Errore durante la lettura del file\n");
return 1;
}
indptr = (int *)malloc((N+1) * sizeof(int));
indices = (int *)malloc((k+1) * sizeof(int));
load_graph(filename, indptr, indices);
numbering = (double *)malloc((N+1)*sizeof(double));
cudaMalloc((void**)&numbering_gpu, (N+1)*sizeof(double));
cudaMalloc((void**)&mask, (N+1)*sizeof(float));
cudaMalloc((void**)&roots, (N+1)*sizeof(float));
cudaMalloc((void**)&indptr_gpu, (N+1)*sizeof(int));
cudaMalloc((void**)&indices_gpu, (k+1)*sizeof(int));
cudaMemcpy(indptr_gpu, indptr, (N+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(indices_gpu, indices, (k+1)*sizeof(int), cudaMemcpyHostToDevice);
init_array_double<<< 1, N >>>(numbering_gpu, 0);
init_array<<< 1, N >>>(mask, 1);
// init_array_consecutive<<< 1, N >>>(roots);
double delta = pow(8, ceil(log(N) / log(1.25)));
int flag = 1;
int extra_space = N / 16 + N / 16*16 + 1;
unsigned int sharedmemsize = sizeof(float) * 2 * (N + extra_space + 10);
set<double> numbering_copy;
cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("INITS kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
while(flag && delta >= 1){
init_array_consecutive<<< 1, N >>>(roots);
get_class_components_global<<< 1, 1, sharedmemsize >>>(numbering_gpu, indptr_gpu, indices_gpu, mask, N, roots);
cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess){
printf("CLASS COMPONENT kernel launch failed with error \"%s: %s\".\n",
cudaGetErrorName(cudaerr), cudaGetErrorString(cudaerr));
}
stratify<<< 1, N, sharedmemsize >>>(numbering_gpu, roots, indptr_gpu, indices_gpu, delta, N);
cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("STRATIFY kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
flag = 0;
delta /= 8;
int oldsize = 0;
// print_array_global<<< 1, N >>>(numbering_gpu);
cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("PRINT kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
cudaMemcpy(numbering, numbering_gpu, N*sizeof(double), cudaMemcpyDeviceToHost);
numbering_copy.clear();
for(int i=0; i<N && !flag; i++){
numbering_copy.insert(numbering[i]);
if(numbering_copy.size() == oldsize)
flag = 1;
oldsize = numbering_copy.size();
}
}
for(int i=0; i<N; i++)
printf("%lf ", numbering[i]);
printf("\n");
printf("%d\n", numbering_copy.size());
cudaFree(numbering_gpu);
cudaFree(mask);
cudaFree(indptr_gpu);
cudaFree(indices_gpu);
cudaFree(roots);
free(indptr);
free(indices);
free(numbering);
system("pause");
return 0;
} |
6b53ddd30117de744781b054cf3618967fde9a5c.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/JitLoops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
const char logical_and_name[] = "logical_and_kernel";
void logical_and_kernel_cuda(TensorIterator& iter) {
auto dtype = iter.common_dtype();
if (at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto logical_and_string = jiterator_stringify(
template <typename T>
T logical_and_kernel(T a, T b) {
return a && b;
}
); // logical_and_string
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_and_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/ logical_and_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, logical_and_string);
}); // logical_and_string
#else
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_and_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a && b;
});
});
#endif
} else {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, ScalarType::BFloat16,
dtype, "logical_and_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a && b;
});
});
}
}
void logical_or_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, ScalarType::BFloat16,
iter.common_dtype(), "logical_or_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a || b;
});
});
}
void logical_xor_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, ScalarType::BFloat16,
iter.common_dtype(), "logical_xor_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return bool(a) != bool(b);
});
});
}
REGISTER_DISPATCH(logical_and_stub, &logical_and_kernel_cuda);
REGISTER_DISPATCH(logical_or_stub, &logical_or_kernel_cuda);
REGISTER_DISPATCH(logical_xor_stub, &logical_xor_kernel_cuda);
}} // namespace at::native
| 6b53ddd30117de744781b054cf3618967fde9a5c.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
const char logical_and_name[] = "logical_and_kernel";
void logical_and_kernel_cuda(TensorIterator& iter) {
auto dtype = iter.common_dtype();
if (at::isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto logical_and_string = jiterator_stringify(
template <typename T>
T logical_and_kernel(T a, T b) {
return a && b;
}
); // logical_and_string
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_and_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/ logical_and_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, logical_and_string);
}); // logical_and_string
#else
AT_DISPATCH_COMPLEX_TYPES(dtype, "logical_and_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a && b;
});
});
#endif
} else {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, ScalarType::BFloat16,
dtype, "logical_and_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a && b;
});
});
}
}
void logical_or_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, ScalarType::BFloat16,
iter.common_dtype(), "logical_or_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a || b;
});
});
}
void logical_xor_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, ScalarType::BFloat16,
iter.common_dtype(), "logical_xor_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return bool(a) != bool(b);
});
});
}
REGISTER_DISPATCH(logical_and_stub, &logical_and_kernel_cuda);
REGISTER_DISPATCH(logical_or_stub, &logical_or_kernel_cuda);
REGISTER_DISPATCH(logical_xor_stub, &logical_xor_kernel_cuda);
}} // namespace at::native
|
a1feb29296f6a18fdaae62438a1890ec42c2313b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Intel Corporation: 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <vector>
#include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossForwardGPU(const int nthreads,
const Dtype* input_data, const Dtype* target, Dtype* loss,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int target_value = static_cast<int>(target[i]);
if (has_ignore_label_ && target_value == ignore_label_) {
loss[i] = 0;
counts[i] = 0;
} else {
loss[i] = input_data[i] * (target[i] - (input_data[i] >= 0)) -
log(1 + exp(input_data[i] - 2 * input_data[i] *
(input_data[i] >= 0)));
counts[i] = 1;
}
}
}
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossIgnoreDiffGPU(const int count,
const int ignore_label, const Dtype* target, Dtype* diff) {
CUDA_KERNEL_LOOP(i, count) {
const int target_value = static_cast<int>(target[i]);
if (target_value == ignore_label) {
diff[i] = 0;
}
}
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the sigmoid outputs.
sigmoid_bottom_vec_[0] = bottom[0];
sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
// Compute the loss (negative log likelihood)
const int count = bottom[0]->count();
// Stable version of loss computation from input data
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
// Since this memory is not used for anything, we use it here to avoid having
// to allocate new GPU memory to accumulate intermediate results.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
Dtype* count_data = bottom[1]->mutable_gpu_diff();
Dtype valid_count;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SigmoidCrossEntropyLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, input_data, target, loss_data,
has_ignore_label_, ignore_label_, count_data);
// Only launch another CUDA kernel if we actually need the valid count.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(count, count_data, &valid_count);
} else {
valid_count = count;
}
Dtype loss;
caffe_gpu_asum(count, loss_data, &loss);
normalizer_ = get_normalizer(normalization_, valid_count);
top[0]->mutable_cpu_data()[0] = loss / normalizer_;
// Clear scratch memory to prevent interfering with backward (see #6202).
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff());
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
// First, compute the diff
const int count = bottom[0]->count();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(count, sigmoid_output_data, bottom_diff);
caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff);
// Zero out gradient of ignored targets.
if (has_ignore_label_) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SigmoidCrossEntropyLossIgnoreDiffGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, ignore_label_, target, bottom_diff);
}
// Scale down gradient
Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer_;
caffe_gpu_scal(count, loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer);
} // namespace caffe
| a1feb29296f6a18fdaae62438a1890ec42c2313b.cu | /*
All modification made by Intel Corporation: © 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <vector>
#include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossForwardGPU(const int nthreads,
const Dtype* input_data, const Dtype* target, Dtype* loss,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(i, nthreads) {
const int target_value = static_cast<int>(target[i]);
if (has_ignore_label_ && target_value == ignore_label_) {
loss[i] = 0;
counts[i] = 0;
} else {
loss[i] = input_data[i] * (target[i] - (input_data[i] >= 0)) -
log(1 + exp(input_data[i] - 2 * input_data[i] *
(input_data[i] >= 0)));
counts[i] = 1;
}
}
}
template <typename Dtype>
__global__ void SigmoidCrossEntropyLossIgnoreDiffGPU(const int count,
const int ignore_label, const Dtype* target, Dtype* diff) {
CUDA_KERNEL_LOOP(i, count) {
const int target_value = static_cast<int>(target[i]);
if (target_value == ignore_label) {
diff[i] = 0;
}
}
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the sigmoid outputs.
sigmoid_bottom_vec_[0] = bottom[0];
sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_);
// Compute the loss (negative log likelihood)
const int count = bottom[0]->count();
// Stable version of loss computation from input data
const Dtype* input_data = bottom[0]->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
// Since this memory is not used for anything, we use it here to avoid having
// to allocate new GPU memory to accumulate intermediate results.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
Dtype* count_data = bottom[1]->mutable_gpu_diff();
Dtype valid_count;
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidCrossEntropyLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, input_data, target, loss_data,
has_ignore_label_, ignore_label_, count_data);
// Only launch another CUDA kernel if we actually need the valid count.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(count, count_data, &valid_count);
} else {
valid_count = count;
}
Dtype loss;
caffe_gpu_asum(count, loss_data, &loss);
normalizer_ = get_normalizer(normalization_, valid_count);
top[0]->mutable_cpu_data()[0] = loss / normalizer_;
// Clear scratch memory to prevent interfering with backward (see #6202).
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff());
}
template <typename Dtype>
void SigmoidCrossEntropyLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
// First, compute the diff
const int count = bottom[0]->count();
const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data();
const Dtype* target = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(count, sigmoid_output_data, bottom_diff);
caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff);
// Zero out gradient of ignored targets.
if (has_ignore_label_) {
// NOLINT_NEXT_LINE(whitespace/operators)
SigmoidCrossEntropyLossIgnoreDiffGPU<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, ignore_label_, target, bottom_diff);
}
// Scale down gradient
Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer_;
caffe_gpu_scal(count, loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer);
} // namespace caffe
|
95c4db34c07b1e3dd247e0502d2300b591b16f41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void BilinearResampleSubImageKernel(float *input, float *output, float* subImageDefs, bool safeBounds, int inputWidth, int inputHeight, int outputWidth, int outputHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int size = outputWidth * outputHeight;
if (id < size)
{
float subImgCX = subImageDefs[0]; // <-1, 1>
float subImgCY = subImageDefs[1]; // <-1, 1>
float subImgDiameter = subImageDefs[2]; // <0,1>
int maxDiameter = min(inputWidth - 1, inputHeight - 1);
int diameterPix = (int)(subImgDiameter * maxDiameter);
diameterPix = max(1, diameterPix);
diameterPix = min(maxDiameter, diameterPix);
int subImgX = (int)(inputWidth * (subImgCX + 1) * 0.5f) - diameterPix / 2;
int subImgY = (int)(inputHeight * (subImgCY + 1) * 0.5f) - diameterPix / 2;
if (safeBounds)
{
subImgX = max(subImgX, 1);
subImgY = max(subImgY, 1);
subImgX = min(subImgX, inputWidth - diameterPix - 1);
subImgY = min(subImgY, inputHeight - diameterPix - 1);
}
int px = id % outputWidth;
int py = id / outputWidth;
float xRatio = (float)(diameterPix - 1) / (outputWidth - 1);
float yRatio = (float)(diameterPix - 1) / (outputHeight - 1);
int x = (int) (xRatio * px);
int y = (int) (yRatio * py);
if (x + subImgX >= 0 && y + subImgY >= 0 &&
x + subImgX < inputWidth && y + subImgY < inputHeight)
{
// X and Y distance difference
float xDist = (xRatio * px) - x;
float yDist = (yRatio * py) - y;
// Points
float topLeft= input[(y + subImgY) * inputWidth + x + subImgX];
float topRight = input[(y + subImgY) * inputWidth + x + subImgX + 1];
float bottomLeft = input[(y + subImgY + 1) * inputWidth + x + subImgX];
float bottomRight = input[(y + subImgY + 1) * inputWidth + x + subImgX + 1];
float result =
topLeft * (1 - xDist) * (1 - yDist) +
topRight * xDist * (1 - yDist) +
bottomLeft * yDist * (1 - xDist) +
bottomRight * xDist * yDist;
output[py * outputWidth + px] = result;
}
}
} | 95c4db34c07b1e3dd247e0502d2300b591b16f41.cu | #include "includes.h"
__global__ void BilinearResampleSubImageKernel(float *input, float *output, float* subImageDefs, bool safeBounds, int inputWidth, int inputHeight, int outputWidth, int outputHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int size = outputWidth * outputHeight;
if (id < size)
{
float subImgCX = subImageDefs[0]; // <-1, 1>
float subImgCY = subImageDefs[1]; // <-1, 1>
float subImgDiameter = subImageDefs[2]; // <0,1>
int maxDiameter = min(inputWidth - 1, inputHeight - 1);
int diameterPix = (int)(subImgDiameter * maxDiameter);
diameterPix = max(1, diameterPix);
diameterPix = min(maxDiameter, diameterPix);
int subImgX = (int)(inputWidth * (subImgCX + 1) * 0.5f) - diameterPix / 2;
int subImgY = (int)(inputHeight * (subImgCY + 1) * 0.5f) - diameterPix / 2;
if (safeBounds)
{
subImgX = max(subImgX, 1);
subImgY = max(subImgY, 1);
subImgX = min(subImgX, inputWidth - diameterPix - 1);
subImgY = min(subImgY, inputHeight - diameterPix - 1);
}
int px = id % outputWidth;
int py = id / outputWidth;
float xRatio = (float)(diameterPix - 1) / (outputWidth - 1);
float yRatio = (float)(diameterPix - 1) / (outputHeight - 1);
int x = (int) (xRatio * px);
int y = (int) (yRatio * py);
if (x + subImgX >= 0 && y + subImgY >= 0 &&
x + subImgX < inputWidth && y + subImgY < inputHeight)
{
// X and Y distance difference
float xDist = (xRatio * px) - x;
float yDist = (yRatio * py) - y;
// Points
float topLeft= input[(y + subImgY) * inputWidth + x + subImgX];
float topRight = input[(y + subImgY) * inputWidth + x + subImgX + 1];
float bottomLeft = input[(y + subImgY + 1) * inputWidth + x + subImgX];
float bottomRight = input[(y + subImgY + 1) * inputWidth + x + subImgX + 1];
float result =
topLeft * (1 - xDist) * (1 - yDist) +
topRight * xDist * (1 - yDist) +
bottomLeft * yDist * (1 - xDist) +
bottomRight * xDist * yDist;
output[py * outputWidth + px] = result;
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.