hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
f0a3588cb3837430a7ceb8d2fd2c01458c47d5cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SDSC Summer Institute 2017
// Andreas Goetz (agoetz@sdsc.edu)
// CUDA program to square matrix elements in parallel on the GPU
//
#include<stdio.h>
// define matrix size, number of blocks NBL and threads per block TPB
#define NROW 2048
#define NCOL 512
#define NBLX 32
#define NBLY 32
#define TPBX 16
#define TPBY 16
//
// CUDA device function that squares elements of a 2D array
//
__global__ void square(int *arr, int maxrow, int maxcol){
int row = threadIdx.x + blockDim.x * blockIdx.x;
int colinit = threadIdx.y + blockDim.y * blockIdx.y;
int rowstride = gridDim.x * blockDim.x;
int colstride = gridDim.y * blockDim.y;
int pos;
// operate on all submatrices
while (row < maxrow) {
int col = colinit;
while (col < maxcol) {
pos = row*maxcol + col;
arr[pos] *= arr[pos];
col += colstride;
}
row += rowstride;
}
}
//
// main program
//
int main(void){
int h_a[NROW][NCOL];
int *d_a;
int size = NROW * NCOL * sizeof(int);
int i, j, err;
// allocate device memory
hipMalloc((void **)&d_a, size);
// initialize matrix
for (i=0; i<NROW; i++){
for (j=0; j<NCOL; j++){
h_a[i][j] = i+j;
// printf("Element (%d,%d) = %d\n",i,j,h_a[i][j]);
}
}
// copy input data to device
hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice);
// add vectors by launching a sufficient number of blocks of the add() kernel
printf("\nLaunching kernel to square matrix elements...\n");
printf("Matrix elements = %d x %d = %d\n",NROW,NCOL,NROW*NCOL);
printf("Blocks = %d x %d = %d\n",NBLX,NBLY,NBLX*NBLY);
printf("Threads per block = %d x %d = %d\n",TPBX,TPBY,TPBX*TPBY);
printf("Kernel copies = %d\n",NBLX*NBLY*TPBX*TPBY);
hipLaunchKernelGGL(( square), dim3(dim3(NBLX,NBLY)),dim3(dim3(TPBX,TPBY)), 0, 0, d_a, NROW, NCOL);
// copy results back to host
hipMemcpy(h_a, d_a, size, hipMemcpyDeviceToHost);
// deallocate memory
hipFree(d_a);
// check results
err = 0;
for (i=0; i<NROW; i++){
for (j=0; j<NCOL; j++){
if (h_a[i][j] != (i+j)*(i+j)) err += 1;
//printf("Element (%d,%d) = %d\n",i,j,h_a[i][j]);
}
}
if (err != 0){
printf("\n Error, %d elements do not match!\n\n", err);
} else {
printf("\n Success! All elements match.\n\n");
}
return 0;
}
| f0a3588cb3837430a7ceb8d2fd2c01458c47d5cb.cu | // SDSC Summer Institute 2017
// Andreas Goetz (agoetz@sdsc.edu)
// CUDA program to square matrix elements in parallel on the GPU
//
#include<stdio.h>
// define matrix size, number of blocks NBL and threads per block TPB
#define NROW 2048
#define NCOL 512
#define NBLX 32
#define NBLY 32
#define TPBX 16
#define TPBY 16
//
// CUDA device function that squares elements of a 2D array
//
__global__ void square(int *arr, int maxrow, int maxcol){
int row = threadIdx.x + blockDim.x * blockIdx.x;
int colinit = threadIdx.y + blockDim.y * blockIdx.y;
int rowstride = gridDim.x * blockDim.x;
int colstride = gridDim.y * blockDim.y;
int pos;
// operate on all submatrices
while (row < maxrow) {
int col = colinit;
while (col < maxcol) {
pos = row*maxcol + col;
arr[pos] *= arr[pos];
col += colstride;
}
row += rowstride;
}
}
//
// main program
//
int main(void){
int h_a[NROW][NCOL];
int *d_a;
int size = NROW * NCOL * sizeof(int);
int i, j, err;
// allocate device memory
cudaMalloc((void **)&d_a, size);
// initialize matrix
for (i=0; i<NROW; i++){
for (j=0; j<NCOL; j++){
h_a[i][j] = i+j;
// printf("Element (%d,%d) = %d\n",i,j,h_a[i][j]);
}
}
// copy input data to device
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
// add vectors by launching a sufficient number of blocks of the add() kernel
printf("\nLaunching kernel to square matrix elements...\n");
printf("Matrix elements = %d x %d = %d\n",NROW,NCOL,NROW*NCOL);
printf("Blocks = %d x %d = %d\n",NBLX,NBLY,NBLX*NBLY);
printf("Threads per block = %d x %d = %d\n",TPBX,TPBY,TPBX*TPBY);
printf("Kernel copies = %d\n",NBLX*NBLY*TPBX*TPBY);
square<<<dim3(NBLX,NBLY),dim3(TPBX,TPBY)>>>(d_a, NROW, NCOL);
// copy results back to host
cudaMemcpy(h_a, d_a, size, cudaMemcpyDeviceToHost);
// deallocate memory
cudaFree(d_a);
// check results
err = 0;
for (i=0; i<NROW; i++){
for (j=0; j<NCOL; j++){
if (h_a[i][j] != (i+j)*(i+j)) err += 1;
//printf("Element (%d,%d) = %d\n",i,j,h_a[i][j]);
}
}
if (err != 0){
printf("\n Error, %d elements do not match!\n\n", err);
} else {
printf("\n Success! All elements match.\n\n");
}
return 0;
}
|
10b33c4b1d5aaf839e51583a25b25161926aad5a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void check_index()
{
printf("threadIdx: (%d,%d,%d) blockIdx: (%d,%d,%d) blockDim: (%d,%d,%d) gridDim: (%d,%d,%d)\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z,
gridDim.x, gridDim.y, blockDim.z
);
}
int main( int argc, char **argv)
{
int Ndata = 10;
dim3 block(3);
dim3 grid( (Ndata+block.x-1)/block.x );
printf("grid: (%d,%d,%d)\n", grid.x, grid.y, grid.z);
printf("blocks: (%d,%d,%d)\n", block.x, block.y, block.z);
hipLaunchKernelGGL(( check_index), dim3(grid), dim3(block), 0, 0, );
hipDeviceReset();
return 0;
} | 10b33c4b1d5aaf839e51583a25b25161926aad5a.cu | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void check_index()
{
printf("threadIdx: (%d,%d,%d) blockIdx: (%d,%d,%d) blockDim: (%d,%d,%d) gridDim: (%d,%d,%d)\n",
threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z,
gridDim.x, gridDim.y, blockDim.z
);
}
int main( int argc, char **argv)
{
int Ndata = 10;
dim3 block(3);
dim3 grid( (Ndata+block.x-1)/block.x );
printf("grid: (%d,%d,%d)\n", grid.x, grid.y, grid.z);
printf("blocks: (%d,%d,%d)\n", block.x, block.y, block.z);
check_index<<<grid, block>>>();
cudaDeviceReset();
return 0;
} |
c841219eb18daba89ee344ee7043d1b408ff12f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void glcm_calculation_180(int *A,int *glcm, const int nx, const int ny,int max){
//int iy = threadIdx.y + blockIdx.y* blockDim.y;
unsigned int idx =blockIdx.x*nx+threadIdx.x;
int i;
int k=0;
for(i=0;i<nx;i++){
if(idx>=i*nx && idx<((i+1) *nx)-1){
k=max*A[idx+1]+A[idx];
atomicAdd(&glcm[k],1);
}
}
} | c841219eb18daba89ee344ee7043d1b408ff12f6.cu | #include "includes.h"
__global__ void glcm_calculation_180(int *A,int *glcm, const int nx, const int ny,int max){
//int iy = threadIdx.y + blockIdx.y* blockDim.y;
unsigned int idx =blockIdx.x*nx+threadIdx.x;
int i;
int k=0;
for(i=0;i<nx;i++){
if(idx>=i*nx && idx<((i+1) *nx)-1){
k=max*A[idx+1]+A[idx];
atomicAdd(&glcm[k],1);
}
}
} |
e98edfa3d79642dac1acdd3bea486ca55ac6965e.hip | // !!! This is a file automatically generated by hipify!!!
#include "distance_transforms.h"
#include <hip/hip_runtime_api.h>
namespace dart {
// kernels
template <typename Real, bool takeSqrt>
__global__ void gpu_distanceTransform1D(const Real * fIn, Real * fOut, int n, Real * z, int * v) {
int k = 0;
v[0] = 0;
z[0] = -INF;
z[1] = +INF;
for (int q = 1; q <= n-1; q++) {
Real s = ((fIn[q]+q*q) - (fIn[v[k]]+v[k]*v[k]))/(2*q-2*v[k]);
while (s <= z[k]) {
k--;
s = ((fIn[q]+q*q)-(fIn[v[k]]+v[k]*v[k]))/(2*q-2*v[k]);
}
k++;
v[k] = q;
z[k] = s;
z[k+1] = +INF;
}
k = 0;
for (int q = 0; q <= n-1; q++) {
while (z[k+1] < q)
k++;
if (takeSqrt)
fOut[q] = sqrtf((q-v[k])*(q-v[k]) + fIn[v[k]]);
else
fOut[q] = (q-v[k])*(q-v[k]) + fIn[v[k]];
}
}
template <typename Real, bool takeSqrt>
__global__ void gpu_stridedDistanceTransform1D(const Real * fIns, Real * fOuts, const unsigned int len, const unsigned int num,
const unsigned int outerStride, const unsigned int innerStride,
Real * zs, int * vs) {
const unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid >= num)
return;
const Real * fIn = &fIns[tid*outerStride];
Real * fOut = &fOuts[tid*outerStride];
Real * z = zs + tid*(len+1);
int * v = vs + tid*len;
int k = 0;
v[0] = 0;
z[0] = -INF;
z[1] = +INF;
for (int q = 1; q <= len-1; q++) {
Real s = ((fIn[q*innerStride]+q*q) - (fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
while (s <= z[k]) {
k--;
s = ((fIn[q*innerStride]+q*q)-(fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
}
k++;
v[k] = q;
z[k] = s;
z[k+1] = +INF;
}
k = 0;
for (int q = 0; q <= len-1; q++) {
while (z[k+1] < q)
k++;
if (takeSqrt)
fOut[q*innerStride] = sqrtf((q-v[k])*(q-v[k]) + fIn[v[k]*innerStride]);
else
fOut[q*innerStride] = (q-v[k])*(q-v[k]) + fIn[v[k]*innerStride];
}
}
template <typename Real, bool takeSqrt>
__global__ void gpu_doublyStridedDistanceTransform1D(const Real * fIns, Real * fOuts, const unsigned int len, const unsigned int maxA, const unsigned int maxB,
const unsigned int outerStrideA, const unsigned int outerStrideB, const unsigned int innerStride,
Real * zs, int * vs) {
const unsigned int a = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int b = blockIdx.y*blockDim.y + threadIdx.y;
if (a >= maxA || b >= maxB)
return;
const Real * fIn = &fIns[a*outerStrideA + b*outerStrideB];
Real * fOut = &fOuts[a*outerStrideA + b*outerStrideB];
const unsigned int tid = a + b*maxA;
Real * z = zs + (tid)*(len+1);
int * v = vs + tid*len;
int k = 0;
v[0] = 0;
z[0] = -INF;
z[1] = +INF;
for (int q = 1; q <= len-1; q++) {
Real s = ((fIn[q*innerStride]+q*q) - (fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
while (s <= z[k]) {
k--;
s = ((fIn[q*innerStride]+q*q)-(fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
}
k++;
v[k] = q;
z[k] = s;
z[k+1] = +INF;
}
k = 0;
for (int q = 0; q <= len-1; q++) {
while (z[k+1] < q)
k++;
if (takeSqrt)
fOut[q*innerStride] = sqrtf((q-v[k])*(q-v[k]) + fIn[v[k]*innerStride]);
else
fOut[q*innerStride] = (q-v[k])*(q-v[k]) + fIn[v[k]*innerStride];
}
}
template <typename Real>
__global__ void gpu_seedInverseTransform2D(const Real * im, Real * seed, const unsigned int width, const unsigned int height) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + width*y;
if (im[index] == 0) {
seed[index] = INF;
if (x > 0) {
if (im[index-1] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (x < width - 1 ) {
if (im[index+1] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (y > 0) {
if (im[index-width] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (y < height - 1) {
if (im[index+width] != 0.0) {
seed[index] = 0.0;
return;
}
}
}
else {
seed[index] = 0;
}
}
template <typename Real>
__global__ void gpu_seedInverseTransform3D(const Real * im, Real * seed, const unsigned int width, const unsigned int height, const unsigned int depth) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= width || y >= height || z >= depth)
return;
const int index = x + width*(y + height*z);
if (im[index] == 0) {
seed[index] = INF;
if (x > 0) {
if (im[index-1] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (x < width - 1 ) {
if (im[index+1] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (y > 0) {
if (im[index-width] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (y < height - 1) {
if (im[index+width] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (z > 0) {
if (im[index-width*height] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (z < depth - 1) {
if (im[index+width*height] != 0.0) {
seed[index] = 0.0;
return;
}
}
}
else {
seed[index] = 0;
}
}
template <typename Real>
__global__ void gpu_combineTransforms2D(Real * im, Real * neg, Real * mask, const unsigned int width, const unsigned int height) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + width*y;
if (mask[index] == 0) {
im[index] = -neg[index];
}
}
template <typename Real>
__global__ void gpu_combineTransforms3D(Real * im, Real * neg, Real * mask, const unsigned int width, const unsigned int height, const unsigned int depth) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= width || y >= height || z >= depth)
return;
const int index = x + width*(y + height*z);
if (mask[index] == 0) {
im[index] = -neg[index];
}
}
// host interface functions
template <typename Real>
void distanceTransform1D(const Real * in, Real * out, const unsigned int width, bool takeSqrt) {
dim3 block(1,1,1);
dim3 grid(1,1,1);
Real * z; hipMalloc(&z,(width+1)*sizeof(Real));
int * v; hipMalloc(&v,width*sizeof(int));
if (takeSqrt) {
hipLaunchKernelGGL(( gpu_distanceTransform1D<Real,true>), dim3(grid),dim3(block), 0, 0, in,out,width,z,v);
} else {
hipLaunchKernelGGL(( gpu_distanceTransform1D<Real,false>), dim3(grid),dim3(block), 0, 0, in,out,width,z,v);
}
hipDeviceSynchronize();
hipFree(z);
hipFree(v);
}
template <typename Real>
void distanceTransform1D(const Real * in, Real * out, const unsigned int width, bool takeSqrt, Real * zScratch, int * vScratch) {
dim3 block(1,1,1);
dim3 grid(1,1,1);
if (takeSqrt) {
hipLaunchKernelGGL(( gpu_distanceTransform1D<Real,true>), dim3(grid),dim3(block), 0, 0, in,out,width,zScratch,vScratch);
} else {
hipLaunchKernelGGL(( gpu_distanceTransform1D<Real,false>), dim3(grid),dim3(block), 0, 0, in,out,width,zScratch,vScratch);
}
}
template <typename Real, bool takeSqrt>
void distanceTransform2D(Real * im, Real * scratch, const unsigned int width, const unsigned int height) {
Real * zScratch; hipMalloc(&zScratch,(width+1)*(height+1)*sizeof(Real));
int * vScratch; hipMalloc(&vScratch,width*height*sizeof(int));
distanceTransform2D<Real,takeSqrt>(im,scratch,width,height,zScratch,vScratch);
hipFree(zScratch);
hipFree(vScratch);
}
template <typename Real>
__global__ void gpu_stridedDistanceTransform1Da(const Real * fIns, Real * fOuts, Real * fIntermediates, const unsigned int len, const unsigned int num,
const unsigned int outerStride, const unsigned int innerStride,
Real * zs, int * vs) {
const unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid >= num)
return;
const Real * fIn = &fIns[tid*outerStride];
Real * fOut = &fOuts[tid*outerStride];
Real * fIntermediate = &fIntermediates[tid*outerStride];
Real * z = zs + tid*(len+1);
int * v = vs + tid*len;
int k = 0;
v[0] = 0;
z[0] = -INF;
z[1] = +INF;
for (int q = 1; q <= len-1; q++) {
Real s = ((fIn[q*innerStride]+q*q) - (fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
while (s <= z[k]) {
k--;
s = ((fIn[q*innerStride]+q*q)-(fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
}
k++;
v[k] = q;
z[k] = s;
z[k+1] = +INF;
}
k = 0;
for (int q = 0; q <= len-1; q++) {
while (z[k+1] < q)
k++;
fIntermediate[q*innerStride] = fIn[v[k]*innerStride];
fOut[q*innerStride] = (q-v[k])*(q-v[k]) + fIn[v[k]*innerStride];
}
}
template <typename Real, bool takeSqrt>
__global__ void gpu_stridedDistanceTransform1Db(const Real * fIns, const Real * fIntermediates, Real * fOuts, const unsigned int len, const unsigned int num,
const unsigned int outerStride, const unsigned int innerStride,
Real * zs, int * vs) {
const unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid >= num)
return;
const Real * fIn = &fIns[tid*outerStride];
const Real * fIntermediate = &fIntermediates[tid*outerStride];
Real * fOut = &fOuts[tid*outerStride];
Real * z = zs + tid*(len+1);
int * v = vs + tid*len;
int k = 0;
v[0] = 0;
z[0] = -INF;
z[1] = +INF;
for (int q = 1; q <= len-1; q++) {
Real s = ((fIn[q*innerStride]+q*q) - (fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
while (s <= z[k]) {
k--;
s = ((fIn[q*innerStride]+q*q)-(fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
}
k++;
v[k] = q;
z[k] = s;
z[k+1] = +INF;
}
k = 0;
for (int q = 0; q <= len-1; q++) {
while (z[k+1] < q)
k++;
if (takeSqrt)
fOut[q*innerStride] = sqrtf((q-v[k])*(q-v[k]) + fIn[v[k]*innerStride] - fIntermediate[v[k]*innerStride]) + fIntermediate[v[k]*innerStride];
else
fOut[q*innerStride] = (q-v[k])*(q-v[k]) + fIn[v[k]*innerStride];
}
}
template <typename Real, bool takeSqrt>
void distanceTransform2D(Real * im, Real * scratch, const unsigned int width, const unsigned int height, Real * zScratch, int * vScratch) {
dim3 block(64,1,1);
dim3 grid( ceil( height / (float)block.x), 1, 1);
Real *fIntermediate;
hipMalloc(&fIntermediate,width*height*sizeof(Real));
// x-direction
hipLaunchKernelGGL(( gpu_stridedDistanceTransform1Da<Real>), dim3(grid),dim3(block), 0, 0, im,scratch,fIntermediate,width,height,width,1,zScratch,vScratch);
hipDeviceSynchronize();
grid = dim3( ceil( width / (float)block.x), 1, 1);
// y-direction
hipLaunchKernelGGL(( gpu_stridedDistanceTransform1Db<Real,takeSqrt>), dim3(grid),dim3(block), 0, 0, scratch,fIntermediate,im,height,width,1,width,zScratch,vScratch);
}
template <typename Real, bool takeSqrt>
void distanceTransform3D(Real * in, Real * out, const unsigned int width, const unsigned int height, const unsigned int depth) {
Real * zScratch; hipMalloc(&zScratch,(width+1)*(height+1)*(depth+1)*sizeof(Real));
int * vScratch; hipMalloc(&vScratch,width*height*depth*sizeof(int));
distanceTransform3D<Real,takeSqrt>(in,out,width,height,depth,zScratch,vScratch);
hipFree(zScratch);
hipFree(vScratch);
}
template <typename Real, bool takeSqrt>
void distanceTransform3D(Real * in, Real * out, const unsigned int width, const unsigned int height, const unsigned int depth, Real * zScratch, int * vScratch) {
dim3 block(64,1,1);
dim3 grid( ceil( height*depth / (float)block.x), 1, 1);
// x-direction
hipLaunchKernelGGL(( gpu_stridedDistanceTransform1D<Real,false>), dim3(grid),dim3(block), 0, 0, in,out,width,height*depth,width,1,
zScratch,vScratch);
hipDeviceSynchronize();
// z-direction
grid = dim3( ceil(width*height / (float)block.x), 1, 1);
hipLaunchKernelGGL(( gpu_stridedDistanceTransform1D<Real,false>), dim3(grid),dim3(block), 0, 0, out,in,depth,width*height,1,width*height,
zScratch,vScratch);
hipDeviceSynchronize();
block = dim3(16,16,1);
grid = dim3(ceil(width / (float)block.x), ceil(depth / (float)block.y), 1);
// y-direction
hipLaunchKernelGGL(( gpu_doublyStridedDistanceTransform1D<Real,takeSqrt>), dim3(grid),dim3(block), 0, 0, in,out,height,width,depth,1,width*height,width,
zScratch,vScratch);
}
template <typename Real, bool takeSqrt>
void signedDistanceTransform2D(Real * in, Real * out, const unsigned int width, const unsigned int height) {
Real *zScratch; hipMalloc(&zScratch,(width+1)*(height+1)*sizeof(Real));
int *vScratch; hipMalloc(&vScratch,width*height*sizeof(int));
signedDistanceTransform2D<Real,takeSqrt>(in,out,width,height,zScratch,vScratch);
hipFree(zScratch);
hipFree(vScratch);
}
template <typename Real, bool takeSqrt>
void signedDistanceTransform2D(Real * in, Real * out, const unsigned int width, const unsigned int height,
Real * zScratch, int *vScratch) {
Real * imScratch1; hipMalloc(&imScratch1,width*height*sizeof(Real));
Real * imScratch2; hipMalloc(&imScratch2,width*height*sizeof(Real));
signedDistanceTransform2D<Real,takeSqrt>(in,out,width,height,zScratch,vScratch,imScratch1,imScratch2);
hipFree(imScratch1);
hipFree(imScratch2);
}
template <typename Real, bool takeSqrt>
void signedDistanceTransform2D(Real * in, Real * out, const unsigned int width, const unsigned int height,
Real * zScratch, int * vScratch, Real * imScratch1, Real * imScratch2) {
dim3 block(16,8,1);
dim3 grid(ceil(width/(float)block.x),ceil(height/(float)block.y),1);
Real * seed = out;
hipLaunchKernelGGL(( gpu_seedInverseTransform2D), dim3(grid),dim3(block), 0, 0, in,seed,width,height);
hipDeviceSynchronize();
Real * mask = imScratch1;
hipMemcpy(mask,in,width*height*sizeof(Real),hipMemcpyDeviceToDevice);
distanceTransform2D<Real,takeSqrt>(in,imScratch2,width,height,zScratch,vScratch);
hipDeviceSynchronize();
distanceTransform2D<Real,takeSqrt>(seed,imScratch2,width,height,zScratch,vScratch);
hipDeviceSynchronize();
Real * inDT = in;
Real * seedDT = seed;
hipLaunchKernelGGL(( gpu_combineTransforms2D), dim3(grid),dim3(block), 0, 0, inDT,seedDT,mask,width,height);
hipMemcpy(out,inDT,width*height*sizeof(Real),hipMemcpyDeviceToDevice);
}
template <typename Real, bool takeSqrt>
void signedDistanceTransform3D(Real * in, Real * out, const unsigned int width, const unsigned int height, const unsigned int depth) {
Real * zScratch; hipMalloc(&zScratch,(width+1)*(height+1)*(depth+1)*sizeof(Real));
int * vScratch; hipMalloc(&vScratch,width*height*depth*sizeof(int));
signedDistanceTransform3D<Real,takeSqrt>(in,out,width,height,depth,zScratch,vScratch);
hipFree(zScratch);
hipFree(vScratch);
}
template <typename Real, bool takeSqrt>
void signedDistanceTransform3D(Real * in, Real * out, const unsigned int width, const unsigned int height, const unsigned int depth,
Real * zScratch, int * vScratch) {
Real * imScratch; hipMalloc(&imScratch,width*height*depth*sizeof(Real));
signedDistanceTransform3D<Real,takeSqrt>(in,out,width,height,depth,zScratch,vScratch,imScratch);
hipFree(imScratch);
}
template <typename Real, bool takeSqrt>
void signedDistanceTransform3D(Real * in, Real * out, const unsigned int width, const unsigned int height, const unsigned int depth,
Real * zScratch, int * vScratch, Real * imScratch) {
dim3 block(8,8,4);
dim3 grid(ceil(width/(float)block.x),ceil(height/(float)block.y),ceil(depth/(float)block.z));
Real * seed = out;
hipLaunchKernelGGL(( gpu_seedInverseTransform3D), dim3(grid),dim3(block), 0, 0, in,seed,width,height,depth);
hipDeviceSynchronize();
Real * seedDT = imScratch;
distanceTransform3D<Real,takeSqrt>(seed,seedDT,width,height,depth,zScratch,vScratch);
hipDeviceSynchronize();
Real * inDT = out;
distanceTransform3D<Real,takeSqrt>(in,inDT,width,height,depth,zScratch,vScratch);
hipDeviceSynchronize();
Real * mask = in;
hipLaunchKernelGGL(( gpu_combineTransforms3D), dim3(grid),dim3(block), 0, 0, inDT,seedDT,mask,width,height,depth);
}
#define DECLARE_DISTANCE_TRANSFORM_1D(REAL) \
template void distanceTransform1D<REAL>(const REAL * in, REAL * out, const unsigned int width, bool takeSqrt); \
template void distanceTransform1D<REAL>(const REAL * in, REAL * out, const unsigned int width, bool takeSqrt, REAL * zScratch, int * vScratch);
#define DECLARE_DISTANCE_TRANSFORM_2D(REAL,TAKE_SQRT) \
template void distanceTransform2D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height); \
template void distanceTransform2D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, REAL * zScratch, int * vScratch);
#define DECLARE_DISTANCE_TRANSFORM_3D(REAL,TAKE_SQRT) \
template void distanceTransform3D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, const unsigned int depth); \
template void distanceTransform3D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, const unsigned int depth, REAL * zScratch, int * vScratch);
#define DECLARE_SIGNED_DISTANCE_TRANSFORM_2D(REAL,TAKE_SQRT) \
template void signedDistanceTransform2D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height); \
template void signedDistanceTransform2D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, REAL * zScratch, int * vScratch); \
template void signedDistanceTransform2D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, REAL * zScratch, int * vScratch, REAL * imScratch1, REAL * imScratch2);
#define DECLARE_SIGNED_DISTANCE_TRANSFORM_3D(REAL,TAKE_SQRT) \
template void signedDistanceTransform3D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, const unsigned int depth); \
template void signedDistanceTransform3D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, const unsigned int depth, REAL * zScratch, int * vScratch); \
template void signedDistanceTransform3D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, const unsigned int depth, REAL * zScratch, int * vScratch, REAL * imScratch);
#define DECLARE_ALL_DISTANCE_TRANSFORMS(REAL,TAKE_SQRT) \
DECLARE_DISTANCE_TRANSFORM_2D(REAL,TAKE_SQRT) \
DECLARE_DISTANCE_TRANSFORM_3D(REAL,TAKE_SQRT) \
DECLARE_SIGNED_DISTANCE_TRANSFORM_2D(REAL,TAKE_SQRT) \
DECLARE_SIGNED_DISTANCE_TRANSFORM_3D(REAL,TAKE_SQRT)
#define DECLARE_DISTANCE_TRANSFORMS(REAL) \
DECLARE_DISTANCE_TRANSFORM_1D(REAL)
DECLARE_ALL_DISTANCE_TRANSFORMS(float,false)
DECLARE_ALL_DISTANCE_TRANSFORMS(float,true)
DECLARE_ALL_DISTANCE_TRANSFORMS(double,false)
DECLARE_ALL_DISTANCE_TRANSFORMS(double,true)
DECLARE_DISTANCE_TRANSFORMS(float)
DECLARE_DISTANCE_TRANSFORMS(double)
}
| e98edfa3d79642dac1acdd3bea486ca55ac6965e.cu | #include "distance_transforms.h"
#include <cuda_runtime_api.h>
namespace dart {
// kernels
template <typename Real, bool takeSqrt>
__global__ void gpu_distanceTransform1D(const Real * fIn, Real * fOut, int n, Real * z, int * v) {
int k = 0;
v[0] = 0;
z[0] = -INF;
z[1] = +INF;
for (int q = 1; q <= n-1; q++) {
Real s = ((fIn[q]+q*q) - (fIn[v[k]]+v[k]*v[k]))/(2*q-2*v[k]);
while (s <= z[k]) {
k--;
s = ((fIn[q]+q*q)-(fIn[v[k]]+v[k]*v[k]))/(2*q-2*v[k]);
}
k++;
v[k] = q;
z[k] = s;
z[k+1] = +INF;
}
k = 0;
for (int q = 0; q <= n-1; q++) {
while (z[k+1] < q)
k++;
if (takeSqrt)
fOut[q] = sqrtf((q-v[k])*(q-v[k]) + fIn[v[k]]);
else
fOut[q] = (q-v[k])*(q-v[k]) + fIn[v[k]];
}
}
template <typename Real, bool takeSqrt>
__global__ void gpu_stridedDistanceTransform1D(const Real * fIns, Real * fOuts, const unsigned int len, const unsigned int num,
const unsigned int outerStride, const unsigned int innerStride,
Real * zs, int * vs) {
const unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid >= num)
return;
const Real * fIn = &fIns[tid*outerStride];
Real * fOut = &fOuts[tid*outerStride];
Real * z = zs + tid*(len+1);
int * v = vs + tid*len;
int k = 0;
v[0] = 0;
z[0] = -INF;
z[1] = +INF;
for (int q = 1; q <= len-1; q++) {
Real s = ((fIn[q*innerStride]+q*q) - (fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
while (s <= z[k]) {
k--;
s = ((fIn[q*innerStride]+q*q)-(fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
}
k++;
v[k] = q;
z[k] = s;
z[k+1] = +INF;
}
k = 0;
for (int q = 0; q <= len-1; q++) {
while (z[k+1] < q)
k++;
if (takeSqrt)
fOut[q*innerStride] = sqrtf((q-v[k])*(q-v[k]) + fIn[v[k]*innerStride]);
else
fOut[q*innerStride] = (q-v[k])*(q-v[k]) + fIn[v[k]*innerStride];
}
}
template <typename Real, bool takeSqrt>
__global__ void gpu_doublyStridedDistanceTransform1D(const Real * fIns, Real * fOuts, const unsigned int len, const unsigned int maxA, const unsigned int maxB,
const unsigned int outerStrideA, const unsigned int outerStrideB, const unsigned int innerStride,
Real * zs, int * vs) {
const unsigned int a = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int b = blockIdx.y*blockDim.y + threadIdx.y;
if (a >= maxA || b >= maxB)
return;
const Real * fIn = &fIns[a*outerStrideA + b*outerStrideB];
Real * fOut = &fOuts[a*outerStrideA + b*outerStrideB];
const unsigned int tid = a + b*maxA;
Real * z = zs + (tid)*(len+1);
int * v = vs + tid*len;
int k = 0;
v[0] = 0;
z[0] = -INF;
z[1] = +INF;
for (int q = 1; q <= len-1; q++) {
Real s = ((fIn[q*innerStride]+q*q) - (fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
while (s <= z[k]) {
k--;
s = ((fIn[q*innerStride]+q*q)-(fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
}
k++;
v[k] = q;
z[k] = s;
z[k+1] = +INF;
}
k = 0;
for (int q = 0; q <= len-1; q++) {
while (z[k+1] < q)
k++;
if (takeSqrt)
fOut[q*innerStride] = sqrtf((q-v[k])*(q-v[k]) + fIn[v[k]*innerStride]);
else
fOut[q*innerStride] = (q-v[k])*(q-v[k]) + fIn[v[k]*innerStride];
}
}
template <typename Real>
__global__ void gpu_seedInverseTransform2D(const Real * im, Real * seed, const unsigned int width, const unsigned int height) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + width*y;
if (im[index] == 0) {
seed[index] = INF;
if (x > 0) {
if (im[index-1] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (x < width - 1 ) {
if (im[index+1] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (y > 0) {
if (im[index-width] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (y < height - 1) {
if (im[index+width] != 0.0) {
seed[index] = 0.0;
return;
}
}
}
else {
seed[index] = 0;
}
}
template <typename Real>
__global__ void gpu_seedInverseTransform3D(const Real * im, Real * seed, const unsigned int width, const unsigned int height, const unsigned int depth) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= width || y >= height || z >= depth)
return;
const int index = x + width*(y + height*z);
if (im[index] == 0) {
seed[index] = INF;
if (x > 0) {
if (im[index-1] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (x < width - 1 ) {
if (im[index+1] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (y > 0) {
if (im[index-width] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (y < height - 1) {
if (im[index+width] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (z > 0) {
if (im[index-width*height] != 0.0) {
seed[index] = 0.0;
return;
}
}
if (z < depth - 1) {
if (im[index+width*height] != 0.0) {
seed[index] = 0.0;
return;
}
}
}
else {
seed[index] = 0;
}
}
template <typename Real>
__global__ void gpu_combineTransforms2D(Real * im, Real * neg, Real * mask, const unsigned int width, const unsigned int height) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + width*y;
if (mask[index] == 0) {
im[index] = -neg[index];
}
}
template <typename Real>
__global__ void gpu_combineTransforms3D(Real * im, Real * neg, Real * mask, const unsigned int width, const unsigned int height, const unsigned int depth) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int z = blockIdx.z*blockDim.z + threadIdx.z;
if (x >= width || y >= height || z >= depth)
return;
const int index = x + width*(y + height*z);
if (mask[index] == 0) {
im[index] = -neg[index];
}
}
// host interface functions
template <typename Real>
void distanceTransform1D(const Real * in, Real * out, const unsigned int width, bool takeSqrt) {
dim3 block(1,1,1);
dim3 grid(1,1,1);
Real * z; cudaMalloc(&z,(width+1)*sizeof(Real));
int * v; cudaMalloc(&v,width*sizeof(int));
if (takeSqrt) {
gpu_distanceTransform1D<Real,true><<<grid,block>>>(in,out,width,z,v);
} else {
gpu_distanceTransform1D<Real,false><<<grid,block>>>(in,out,width,z,v);
}
cudaDeviceSynchronize();
cudaFree(z);
cudaFree(v);
}
template <typename Real>
void distanceTransform1D(const Real * in, Real * out, const unsigned int width, bool takeSqrt, Real * zScratch, int * vScratch) {
dim3 block(1,1,1);
dim3 grid(1,1,1);
if (takeSqrt) {
gpu_distanceTransform1D<Real,true><<<grid,block>>>(in,out,width,zScratch,vScratch);
} else {
gpu_distanceTransform1D<Real,false><<<grid,block>>>(in,out,width,zScratch,vScratch);
}
}
template <typename Real, bool takeSqrt>
void distanceTransform2D(Real * im, Real * scratch, const unsigned int width, const unsigned int height) {
Real * zScratch; cudaMalloc(&zScratch,(width+1)*(height+1)*sizeof(Real));
int * vScratch; cudaMalloc(&vScratch,width*height*sizeof(int));
distanceTransform2D<Real,takeSqrt>(im,scratch,width,height,zScratch,vScratch);
cudaFree(zScratch);
cudaFree(vScratch);
}
template <typename Real>
__global__ void gpu_stridedDistanceTransform1Da(const Real * fIns, Real * fOuts, Real * fIntermediates, const unsigned int len, const unsigned int num,
const unsigned int outerStride, const unsigned int innerStride,
Real * zs, int * vs) {
const unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid >= num)
return;
const Real * fIn = &fIns[tid*outerStride];
Real * fOut = &fOuts[tid*outerStride];
Real * fIntermediate = &fIntermediates[tid*outerStride];
Real * z = zs + tid*(len+1);
int * v = vs + tid*len;
int k = 0;
v[0] = 0;
z[0] = -INF;
z[1] = +INF;
for (int q = 1; q <= len-1; q++) {
Real s = ((fIn[q*innerStride]+q*q) - (fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
while (s <= z[k]) {
k--;
s = ((fIn[q*innerStride]+q*q)-(fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
}
k++;
v[k] = q;
z[k] = s;
z[k+1] = +INF;
}
k = 0;
for (int q = 0; q <= len-1; q++) {
while (z[k+1] < q)
k++;
fIntermediate[q*innerStride] = fIn[v[k]*innerStride];
fOut[q*innerStride] = (q-v[k])*(q-v[k]) + fIn[v[k]*innerStride];
}
}
template <typename Real, bool takeSqrt>
__global__ void gpu_stridedDistanceTransform1Db(const Real * fIns, const Real * fIntermediates, Real * fOuts, const unsigned int len, const unsigned int num,
const unsigned int outerStride, const unsigned int innerStride,
Real * zs, int * vs) {
const unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid >= num)
return;
const Real * fIn = &fIns[tid*outerStride];
const Real * fIntermediate = &fIntermediates[tid*outerStride];
Real * fOut = &fOuts[tid*outerStride];
Real * z = zs + tid*(len+1);
int * v = vs + tid*len;
int k = 0;
v[0] = 0;
z[0] = -INF;
z[1] = +INF;
for (int q = 1; q <= len-1; q++) {
Real s = ((fIn[q*innerStride]+q*q) - (fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
while (s <= z[k]) {
k--;
s = ((fIn[q*innerStride]+q*q)-(fIn[v[k]*innerStride]+v[k]*v[k]))/(2*q-2*v[k]);
}
k++;
v[k] = q;
z[k] = s;
z[k+1] = +INF;
}
k = 0;
for (int q = 0; q <= len-1; q++) {
while (z[k+1] < q)
k++;
if (takeSqrt)
fOut[q*innerStride] = sqrtf((q-v[k])*(q-v[k]) + fIn[v[k]*innerStride] - fIntermediate[v[k]*innerStride]) + fIntermediate[v[k]*innerStride];
else
fOut[q*innerStride] = (q-v[k])*(q-v[k]) + fIn[v[k]*innerStride];
}
}
template <typename Real, bool takeSqrt>
void distanceTransform2D(Real * im, Real * scratch, const unsigned int width, const unsigned int height, Real * zScratch, int * vScratch) {
dim3 block(64,1,1);
dim3 grid( ceil( height / (float)block.x), 1, 1);
Real *fIntermediate;
cudaMalloc(&fIntermediate,width*height*sizeof(Real));
// x-direction
gpu_stridedDistanceTransform1Da<Real><<<grid,block>>>(im,scratch,fIntermediate,width,height,width,1,zScratch,vScratch);
cudaDeviceSynchronize();
grid = dim3( ceil( width / (float)block.x), 1, 1);
// y-direction
gpu_stridedDistanceTransform1Db<Real,takeSqrt><<<grid,block>>>(scratch,fIntermediate,im,height,width,1,width,zScratch,vScratch);
}
template <typename Real, bool takeSqrt>
void distanceTransform3D(Real * in, Real * out, const unsigned int width, const unsigned int height, const unsigned int depth) {
Real * zScratch; cudaMalloc(&zScratch,(width+1)*(height+1)*(depth+1)*sizeof(Real));
int * vScratch; cudaMalloc(&vScratch,width*height*depth*sizeof(int));
distanceTransform3D<Real,takeSqrt>(in,out,width,height,depth,zScratch,vScratch);
cudaFree(zScratch);
cudaFree(vScratch);
}
template <typename Real, bool takeSqrt>
void distanceTransform3D(Real * in, Real * out, const unsigned int width, const unsigned int height, const unsigned int depth, Real * zScratch, int * vScratch) {
dim3 block(64,1,1);
dim3 grid( ceil( height*depth / (float)block.x), 1, 1);
// x-direction
gpu_stridedDistanceTransform1D<Real,false><<<grid,block>>>(in,out,width,height*depth,width,1,
zScratch,vScratch);
cudaDeviceSynchronize();
// z-direction
grid = dim3( ceil(width*height / (float)block.x), 1, 1);
gpu_stridedDistanceTransform1D<Real,false><<<grid,block>>>(out,in,depth,width*height,1,width*height,
zScratch,vScratch);
cudaDeviceSynchronize();
block = dim3(16,16,1);
grid = dim3(ceil(width / (float)block.x), ceil(depth / (float)block.y), 1);
// y-direction
gpu_doublyStridedDistanceTransform1D<Real,takeSqrt><<<grid,block>>>(in,out,height,width,depth,1,width*height,width,
zScratch,vScratch);
}
template <typename Real, bool takeSqrt>
void signedDistanceTransform2D(Real * in, Real * out, const unsigned int width, const unsigned int height) {
Real *zScratch; cudaMalloc(&zScratch,(width+1)*(height+1)*sizeof(Real));
int *vScratch; cudaMalloc(&vScratch,width*height*sizeof(int));
signedDistanceTransform2D<Real,takeSqrt>(in,out,width,height,zScratch,vScratch);
cudaFree(zScratch);
cudaFree(vScratch);
}
template <typename Real, bool takeSqrt>
void signedDistanceTransform2D(Real * in, Real * out, const unsigned int width, const unsigned int height,
Real * zScratch, int *vScratch) {
Real * imScratch1; cudaMalloc(&imScratch1,width*height*sizeof(Real));
Real * imScratch2; cudaMalloc(&imScratch2,width*height*sizeof(Real));
signedDistanceTransform2D<Real,takeSqrt>(in,out,width,height,zScratch,vScratch,imScratch1,imScratch2);
cudaFree(imScratch1);
cudaFree(imScratch2);
}
template <typename Real, bool takeSqrt>
void signedDistanceTransform2D(Real * in, Real * out, const unsigned int width, const unsigned int height,
Real * zScratch, int * vScratch, Real * imScratch1, Real * imScratch2) {
dim3 block(16,8,1);
dim3 grid(ceil(width/(float)block.x),ceil(height/(float)block.y),1);
Real * seed = out;
gpu_seedInverseTransform2D<<<grid,block>>>(in,seed,width,height);
cudaDeviceSynchronize();
Real * mask = imScratch1;
cudaMemcpy(mask,in,width*height*sizeof(Real),cudaMemcpyDeviceToDevice);
distanceTransform2D<Real,takeSqrt>(in,imScratch2,width,height,zScratch,vScratch);
cudaDeviceSynchronize();
distanceTransform2D<Real,takeSqrt>(seed,imScratch2,width,height,zScratch,vScratch);
cudaDeviceSynchronize();
Real * inDT = in;
Real * seedDT = seed;
gpu_combineTransforms2D<<<grid,block>>>(inDT,seedDT,mask,width,height);
cudaMemcpy(out,inDT,width*height*sizeof(Real),cudaMemcpyDeviceToDevice);
}
template <typename Real, bool takeSqrt>
void signedDistanceTransform3D(Real * in, Real * out, const unsigned int width, const unsigned int height, const unsigned int depth) {
Real * zScratch; cudaMalloc(&zScratch,(width+1)*(height+1)*(depth+1)*sizeof(Real));
int * vScratch; cudaMalloc(&vScratch,width*height*depth*sizeof(int));
signedDistanceTransform3D<Real,takeSqrt>(in,out,width,height,depth,zScratch,vScratch);
cudaFree(zScratch);
cudaFree(vScratch);
}
template <typename Real, bool takeSqrt>
void signedDistanceTransform3D(Real * in, Real * out, const unsigned int width, const unsigned int height, const unsigned int depth,
Real * zScratch, int * vScratch) {
Real * imScratch; cudaMalloc(&imScratch,width*height*depth*sizeof(Real));
signedDistanceTransform3D<Real,takeSqrt>(in,out,width,height,depth,zScratch,vScratch,imScratch);
cudaFree(imScratch);
}
template <typename Real, bool takeSqrt>
void signedDistanceTransform3D(Real * in, Real * out, const unsigned int width, const unsigned int height, const unsigned int depth,
Real * zScratch, int * vScratch, Real * imScratch) {
dim3 block(8,8,4);
dim3 grid(ceil(width/(float)block.x),ceil(height/(float)block.y),ceil(depth/(float)block.z));
Real * seed = out;
gpu_seedInverseTransform3D<<<grid,block>>>(in,seed,width,height,depth);
cudaDeviceSynchronize();
Real * seedDT = imScratch;
distanceTransform3D<Real,takeSqrt>(seed,seedDT,width,height,depth,zScratch,vScratch);
cudaDeviceSynchronize();
Real * inDT = out;
distanceTransform3D<Real,takeSqrt>(in,inDT,width,height,depth,zScratch,vScratch);
cudaDeviceSynchronize();
Real * mask = in;
gpu_combineTransforms3D<<<grid,block>>>(inDT,seedDT,mask,width,height,depth);
}
#define DECLARE_DISTANCE_TRANSFORM_1D(REAL) \
template void distanceTransform1D<REAL>(const REAL * in, REAL * out, const unsigned int width, bool takeSqrt); \
template void distanceTransform1D<REAL>(const REAL * in, REAL * out, const unsigned int width, bool takeSqrt, REAL * zScratch, int * vScratch);
#define DECLARE_DISTANCE_TRANSFORM_2D(REAL,TAKE_SQRT) \
template void distanceTransform2D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height); \
template void distanceTransform2D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, REAL * zScratch, int * vScratch);
#define DECLARE_DISTANCE_TRANSFORM_3D(REAL,TAKE_SQRT) \
template void distanceTransform3D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, const unsigned int depth); \
template void distanceTransform3D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, const unsigned int depth, REAL * zScratch, int * vScratch);
#define DECLARE_SIGNED_DISTANCE_TRANSFORM_2D(REAL,TAKE_SQRT) \
template void signedDistanceTransform2D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height); \
template void signedDistanceTransform2D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, REAL * zScratch, int * vScratch); \
template void signedDistanceTransform2D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, REAL * zScratch, int * vScratch, REAL * imScratch1, REAL * imScratch2);
#define DECLARE_SIGNED_DISTANCE_TRANSFORM_3D(REAL,TAKE_SQRT) \
template void signedDistanceTransform3D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, const unsigned int depth); \
template void signedDistanceTransform3D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, const unsigned int depth, REAL * zScratch, int * vScratch); \
template void signedDistanceTransform3D<REAL,TAKE_SQRT>(REAL * in, REAL * out, const unsigned int width, const unsigned int height, const unsigned int depth, REAL * zScratch, int * vScratch, REAL * imScratch);
#define DECLARE_ALL_DISTANCE_TRANSFORMS(REAL,TAKE_SQRT) \
DECLARE_DISTANCE_TRANSFORM_2D(REAL,TAKE_SQRT) \
DECLARE_DISTANCE_TRANSFORM_3D(REAL,TAKE_SQRT) \
DECLARE_SIGNED_DISTANCE_TRANSFORM_2D(REAL,TAKE_SQRT) \
DECLARE_SIGNED_DISTANCE_TRANSFORM_3D(REAL,TAKE_SQRT)
#define DECLARE_DISTANCE_TRANSFORMS(REAL) \
DECLARE_DISTANCE_TRANSFORM_1D(REAL)
DECLARE_ALL_DISTANCE_TRANSFORMS(float,false)
DECLARE_ALL_DISTANCE_TRANSFORMS(float,true)
DECLARE_ALL_DISTANCE_TRANSFORMS(double,false)
DECLARE_ALL_DISTANCE_TRANSFORMS(double,true)
DECLARE_DISTANCE_TRANSFORMS(float)
DECLARE_DISTANCE_TRANSFORMS(double)
}
|
fb22f65608f542e23fc9521d09fa35209970ae94.hip | // !!! This is a file automatically generated by hipify!!!
/*
Author: Brian KYANJO
Date: May 1st, 2021
Class: ME571
Description:
------------
Monte Carlo integration implementation using CUDA with Butterfly summation
*/
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include "common.h"
#include <stdio.h>
#include <hiprand/hiprand_kernel.h>
#define THREADS_PER_BLOCK 128
#define SEED 60
//Generate data
__global__ void mcrandom(double *udata, const int N, const int nb, hiprandState_t *states)
{
unsigned int i_glb = blockIdx.x * blockDim.x + threadIdx.x;
int n = N/nb;
//initialse hiprand
hiprand_init((SEED << 20) + i_glb, 0, 0, &states[i_glb]);
if (i_glb<n)
{
for(int i=0; i<nb; i++)
{
double xran = hiprand_uniform_double (&states[i_glb]);
udata[i_glb] += cos(-log(xran));
}
}
}
//reduction kernel
__global__ void reductionOnGPU(double *udata,float *f)
{
__shared__ double u[THREADS_PER_BLOCK];
unsigned int i_glb = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int i_loc = threadIdx.x;
int ib = blockDim.x;
unsigned int i;
//load memory
u[i_loc] = udata[i_glb];
__syncthreads();
//reduction in shared memory
for (i = 1; i<ib; i *=2)
{
int index = 2*i*i_loc;
//__syncthreads();
if (index < blockDim.x)
{
u[index] += u[i + index];
}
__syncthreads();
}
if(i_loc==0)
{
atomicAdd(f,u[0]);
}
}
__global__ void integralOnGPU(float *f, double *Int ,const int N)
{
//global mean
*Int = *f/N;
}
int main(int argc, char **argv)
{
// problem size
long int N = atol(argv[1]);
int T = THREADS_PER_BLOCK;
//random number generator
hiprandState_t *States;
// malloc host memory
double gpuRef;
//start timing
double iStart = seconds();
// malloc device global memory
float *d_f;
double *d_Int;
double *d_udata;
CHECK(hipMalloc((void **)&d_Int, sizeof(double)));
//invoke the kernel
int B = ((N + T -1)/T);
if(B > 65535) B = 65535;
int nb = ceil((N*1.0)/(B*T));
//states allocate memory
CHECK(hipMalloc( (void **)&States, (B*T)*sizeof(hiprandState_t)));
CHECK(hipMalloc((void **)&d_udata, (B*T)*sizeof(double)));
CHECK(hipMalloc((void **)&d_f, B*sizeof(float)));
//start kernel time
double iStartc = seconds();
hipLaunchKernelGGL(( mcrandom), dim3(B),dim3(T), 0, 0, d_udata, N, nb, States);
CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( reductionOnGPU), dim3(B),dim3(T), 0, 0, d_udata,d_f);
CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( integralOnGPU), dim3(1),dim3(1), 0, 0, d_f,d_Int ,N);
CHECK(hipDeviceSynchronize());
//end kernel time
double iElaps_c = seconds() - iStartc;
// check kernel error
CHECK(hipGetLastError());
// copy kernel result back to host side
CHECK(hipMemcpy(&gpuRef, d_Int, sizeof(double), hipMemcpyDeviceToHost));
double iElaps_s = seconds() - iStart;
//error achived
double error = abs(0.5 - gpuRef);
printf("%lld,%f,%e,%f,%f\n",N,gpuRef,error,iElaps_s,iElaps_c);
//free device memory
CHECK(hipFree(States));
CHECK(hipFree(d_f));
CHECK(hipFree(d_Int));
CHECK(hipFree(d_udata));
// reset device
CHECK(hipDeviceReset());
return (0);
}
| fb22f65608f542e23fc9521d09fa35209970ae94.cu | /*
Author: Brian KYANJO
Date: May 1st, 2021
Class: ME571
Description:
------------
Monte Carlo integration implementation using CUDA with Butterfly summation
*/
#include <cuda_runtime.h>
#include <curand.h>
#include "common.h"
#include <stdio.h>
#include <curand_kernel.h>
#define THREADS_PER_BLOCK 128
#define SEED 60
//Generate data
__global__ void mcrandom(double *udata, const int N, const int nb, curandState *states)
{
unsigned int i_glb = blockIdx.x * blockDim.x + threadIdx.x;
int n = N/nb;
//initialse curand
curand_init((SEED << 20) + i_glb, 0, 0, &states[i_glb]);
if (i_glb<n)
{
for(int i=0; i<nb; i++)
{
double xran = curand_uniform_double (&states[i_glb]);
udata[i_glb] += cos(-log(xran));
}
}
}
//reduction kernel
__global__ void reductionOnGPU(double *udata,float *f)
{
__shared__ double u[THREADS_PER_BLOCK];
unsigned int i_glb = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int i_loc = threadIdx.x;
int ib = blockDim.x;
unsigned int i;
//load memory
u[i_loc] = udata[i_glb];
__syncthreads();
//reduction in shared memory
for (i = 1; i<ib; i *=2)
{
int index = 2*i*i_loc;
//__syncthreads();
if (index < blockDim.x)
{
u[index] += u[i + index];
}
__syncthreads();
}
if(i_loc==0)
{
atomicAdd(f,u[0]);
}
}
__global__ void integralOnGPU(float *f, double *Int ,const int N)
{
//global mean
*Int = *f/N;
}
int main(int argc, char **argv)
{
// problem size
long int N = atol(argv[1]);
int T = THREADS_PER_BLOCK;
//random number generator
curandState *States;
// malloc host memory
double gpuRef;
//start timing
double iStart = seconds();
// malloc device global memory
float *d_f;
double *d_Int;
double *d_udata;
CHECK(cudaMalloc((void **)&d_Int, sizeof(double)));
//invoke the kernel
int B = ((N + T -1)/T);
if(B > 65535) B = 65535;
int nb = ceil((N*1.0)/(B*T));
//states allocate memory
CHECK(cudaMalloc( (void **)&States, (B*T)*sizeof(curandState)));
CHECK(cudaMalloc((void **)&d_udata, (B*T)*sizeof(double)));
CHECK(cudaMalloc((void **)&d_f, B*sizeof(float)));
//start kernel time
double iStartc = seconds();
mcrandom<<<B,T>>>(d_udata, N, nb, States);
CHECK(cudaDeviceSynchronize());
reductionOnGPU<<<B,T>>>(d_udata,d_f);
CHECK(cudaDeviceSynchronize());
integralOnGPU<<<1,1>>>(d_f,d_Int ,N);
CHECK(cudaDeviceSynchronize());
//end kernel time
double iElaps_c = seconds() - iStartc;
// check kernel error
CHECK(cudaGetLastError());
// copy kernel result back to host side
CHECK(cudaMemcpy(&gpuRef, d_Int, sizeof(double), cudaMemcpyDeviceToHost));
double iElaps_s = seconds() - iStart;
//error achived
double error = abs(0.5 - gpuRef);
printf("%lld,%f,%e,%f,%f\n",N,gpuRef,error,iElaps_s,iElaps_c);
//free device memory
CHECK(cudaFree(States));
CHECK(cudaFree(d_f));
CHECK(cudaFree(d_Int));
CHECK(cudaFree(d_udata));
// reset device
CHECK(cudaDeviceReset());
return (0);
}
|
10a9218b0592f8b4d23f5ae60ceec6f2d2e9b809.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int roundN, int *bools, const int *idata) {
// TODO
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= roundN)
return;
else if (index >= n)
{
bools[index] = 0;
return;
}
if (idata[index] == 0)
bools[index] = 0;
else
bools[index] = 1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= n)
return;
if (bools[index] == 1)
{
odata[indices[index]] = idata[index];
}
}
}
}
| 10a9218b0592f8b4d23f5ae60ceec6f2d2e9b809.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int roundN, int *bools, const int *idata) {
// TODO
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= roundN)
return;
else if (index >= n)
{
bools[index] = 0;
return;
}
if (idata[index] == 0)
bools[index] = 0;
else
bools[index] = 1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= n)
return;
if (bools[index] == 1)
{
odata[indices[index]] = idata[index];
}
}
}
}
|
5cd9ae051e830e97ca276f080e086c75dfce3a4e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file parsing_utils.cu Utility functions for parsing plain-text files
*
*/
#include "parsing_utils.cuh"
#include <hip/hip_runtime.h>
#include <vector>
#include <memory>
#include <iostream>
#include <utilities/error_utils.hpp>
#include <io/utilities/wrapper_utils.hpp>
#include <rmm/device_buffer.hpp>
// When processing the input in chunks, this is the maximum size of each chunk.
// Only one chunk is loaded on the GPU at a time, so this value is chosen to
// be small enough to fit on the GPU in most cases.
constexpr size_t max_chunk_bytes = 256*1024*1024; // 256MB
constexpr int bytes_per_find_thread = 64;
using pos_key_pair = thrust::pair<uint64_t,char>;
template <typename T>
constexpr T divCeil(T dividend, T divisor) noexcept { return (dividend + divisor - 1) / divisor; }
/**---------------------------------------------------------------------------*
* @brief Sets the specified element of the array to the passed value
*---------------------------------------------------------------------------**/
template<class T, class V>
__device__ __forceinline__
void setElement(T* array, cudf::size_type idx, const T& t, const V& v){
array[idx] = t;
}
/**---------------------------------------------------------------------------*
* @brief Sets the specified element of the array of pairs using the two passed
* parameters.
*---------------------------------------------------------------------------**/
template<class T, class V>
__device__ __forceinline__
void setElement(thrust::pair<T, V>* array, cudf::size_type idx, const T& t, const V& v) {
array[idx] = {t, v};
}
/**---------------------------------------------------------------------------*
* @brief Overloads the setElement() functions for void* arrays.
* Does not do anything, indexing is not allowed with void* arrays.
*---------------------------------------------------------------------------**/
template<class T, class V>
__device__ __forceinline__
void setElement(void* array, cudf::size_type idx, const T& t, const V& v) {
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that finds all occurrences of a character in the given
* character array. If the 'positions' parameter is not void*,
* positions of all occurrences are stored in the output array.
*
* @param[in] data Pointer to the input character array
* @param[in] size Number of bytes in the input array
* @param[in] offset Offset to add to the output positions
* @param[in] key Character to find in the array
* @param[in,out] count Pointer to the number of found occurrences
* @param[out] positions Array containing the output positions
*
* @return void
*---------------------------------------------------------------------------**/
template<class T>
__global__
void countAndSetPositions(char *data, uint64_t size, uint64_t offset, const char key, cudf::size_type* count,
T* positions) {
// thread IDs range per block, so also need the block id
const uint64_t tid = threadIdx.x + (blockDim.x * blockIdx.x);
const uint64_t did = tid * bytes_per_find_thread;
const char *raw = (data + did);
const long byteToProcess = ((did + bytes_per_find_thread) < size) ?
bytes_per_find_thread :
(size - did);
// Process the data
for (long i = 0; i < byteToProcess; i++) {
if (raw[i] == key) {
const auto idx = atomicAdd(count, (cudf::size_type)1);
setElement(positions, idx, did + offset + i, key);
}
}
}
/**---------------------------------------------------------------------------*
* @brief Searches the input character array for each of characters in a set.
* Sums up the number of occurrences. If the 'positions' parameter is not void*,
* positions of all occurrences are stored in the output device array.
*
* Does not load the entire file into the GPU memory at any time, so it can
* be used to parse large files. Output array needs to be preallocated.
*
* @param[in] h_data Pointer to the input character array
* @param[in] h_size Number of bytes in the input array
* @param[in] keys Vector containing the keys to count in the buffer
* @param[in] result_offset Offset to add to the output positions
* @param[out] positions Array containing the output positions
*
* @return cudf::size_type total number of occurrences
*---------------------------------------------------------------------------**/
template<class T>
cudf::size_type findAllFromSet(const char *h_data, size_t h_size, const std::vector<char>& keys, uint64_t result_offset,
T *positions) {
rmm::device_buffer d_chunk(::min(max_chunk_bytes, h_size));
rmm::device_vector<cudf::size_type> d_count(1, 0);
int block_size = 0; // suggested thread count to use
int min_grid_size = 0; // minimum block count required
CUDA_TRY(hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, countAndSetPositions<T>) );
const size_t chunk_count = divCeil(h_size, max_chunk_bytes);
for (size_t ci = 0; ci < chunk_count; ++ci) {
const auto chunk_offset = ci * max_chunk_bytes;
const auto h_chunk = h_data + chunk_offset;
const int chunk_bytes = ::min((size_t)(h_size - ci * max_chunk_bytes), max_chunk_bytes);
const auto chunk_bits = divCeil(chunk_bytes, bytes_per_find_thread);
const int grid_size = divCeil(chunk_bits, block_size);
// Copy chunk to device
CUDA_TRY(hipMemcpyAsync(d_chunk.data(), h_chunk, chunk_bytes, hipMemcpyDefault));
for (char key: keys) {
hipLaunchKernelGGL(( countAndSetPositions<T>) , dim3(grid_size), dim3(block_size) , 0, 0,
static_cast<char *>(d_chunk.data()), chunk_bytes,
chunk_offset + result_offset, key, d_count.data().get(), positions);
}
}
return d_count[0];
}
/**---------------------------------------------------------------------------*
* @brief Searches the input character array for each of characters in a set
* and sums up the number of occurrences.
*
* Does not load the entire buffer into the GPU memory at any time, so it can
* be used with buffers of any size.
*
* @param[in] h_data Pointer to the data in host memory
* @param[in] h_size Size of the input data, in bytes
* @param[in] keys Vector containing the keys to count in the buffer
*
* @return cudf::size_type total number of occurrences
*---------------------------------------------------------------------------**/
cudf::size_type countAllFromSet(const char *h_data, size_t h_size, const std::vector<char>& keys) {
return findAllFromSet<void>(h_data, h_size, keys, 0, nullptr);
}
template cudf::size_type findAllFromSet<uint64_t>(const char *h_data, size_t h_size, const std::vector<char>& keys, uint64_t result_offset,
uint64_t *positions);
template cudf::size_type findAllFromSet<pos_key_pair>(const char *h_data, size_t h_size, const std::vector<char>& keys, uint64_t result_offset,
pos_key_pair *positions);
/**
* @brief A class representing an array of partial sums, stored in the GPU memory.
*
* The object is a reference to the device memory,
* it does not own the allocated buffer.
**/
struct BlockSumArray {
int16_t* d_sums = nullptr; ///< Array of partial sums
uint64_t length = 0; ///< Length of the array
uint64_t block_size = 1; ///< The number of elements aggregated into each partial sum
BlockSumArray(uint64_t len, uint64_t bsize): length(len), block_size(bsize){}
BlockSumArray() noexcept = default;
};
/**
* @brief A class that stores a pyramid of aggregated sums, in the GPU memory.
*
* Pyramid levels are stored bottom to top; each level is aggregation_rate
* times smaller than the previous one, rounded down.
* Objects of this type own the allocated memory.
**/
class BlockSumPyramid {
const uint16_t aggregation_rate_ = 32; ///< Aggregation rate between each level of the pyramid
thrust::host_vector<BlockSumArray> h_levels_; ///< Host: pyramid levels (lowest to highest)
rmm::device_vector<BlockSumArray> d_levels_; ///< Device: pyramid levels (lowest to highest)
public:
BlockSumPyramid(int input_count){
// input parameter is the number of elements aggregated with this pyramid
int prev_count = input_count;
int prev_block_size = 1;
while (prev_count >= aggregation_rate_) {
// We round down when computing the level sizes. Thus, there may be some elements in the input
// array that are outside of the pyramid (up to aggregation_rate_ - 1 elements).
h_levels_.push_back(BlockSumArray(prev_count/aggregation_rate_, prev_block_size*aggregation_rate_));
RMM_ALLOC(&h_levels_.back().d_sums, h_levels_.back().length*sizeof(int16_t), 0);
prev_count = h_levels_.back().length;
prev_block_size = h_levels_.back().block_size;
}
if (!h_levels_.empty()) {
d_levels_ = h_levels_;
}
}
auto operator[](int level_idx) const {return h_levels_[level_idx];}
auto deviceGetLevels() const noexcept {return d_levels_.data().get();}
auto getHeight() const noexcept {return h_levels_.size();}
auto getAggregationRate() const {return aggregation_rate_;}
// disable copying
BlockSumPyramid(BlockSumPyramid&) = delete;
BlockSumPyramid& operator=(BlockSumPyramid&) = delete;
~BlockSumPyramid() {
for (auto& level: h_levels_) {
RMM_FREE(level.d_sums, 0);
}
}
};
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that aggregates bracket nesting levels for each block
* in the input array.
*
* Each sum is the level difference between the first bracket in the block,
* and the first bracket in the next block (if any). For example, "[[]]" = 0,
* because all open brackets are closed. "[[]" = 1, because the one unmatched
* open bracket would raise the level of all subsequent elements.
*
* @param[in] brackets Array of brackets, in (offset, char) format
* @param[in] bracket_count Number of brackets
* @param[in] open_chars Array of characters to treat as open brackets
* @param[in] close_chars Array of characters to treat as close brackets
* @param[in] bracket_char_cnt Number of bracket character pairs
* @param[in, out] sum_array Array of partial sums
*
* @return void
*---------------------------------------------------------------------------**/
__global__
void sumBracketsKernel(
pos_key_pair* brackets, int bracket_count,
const char* open_chars, const char* close_chars, int bracket_char_cnt,
BlockSumArray sum_array) {
const uint64_t sum_idx = threadIdx.x + (blockDim.x * blockIdx.x);
const uint64_t first_in_idx = sum_idx * sum_array.block_size;
if (sum_idx >= sum_array.length)
return;
int16_t sum = 0;
for (uint64_t in_idx = first_in_idx; in_idx < first_in_idx + sum_array.block_size; ++in_idx) {
for (int bchar_idx = 0; bchar_idx < bracket_char_cnt; ++bchar_idx) {
if (brackets[in_idx].second == open_chars[bchar_idx]) {
++sum;
break;
}
if (brackets[in_idx].second == close_chars[bchar_idx]) {
--sum;
break;
}
}
}
sum_array.d_sums[sum_idx] = sum;
}
/**---------------------------------------------------------------------------*
* @brief Wrapper around sumBracketsKernel
*
* @param[in] brackets Array of brackets, in (offset, char) format
* @param[in] bracket_count Number of brackets
* @param[in] open_chars Array of characters to treat as open brackets
* @param[in] close_chars Array of characters to treat as close brackets
* @param[in] bracket_char_cnt Number of bracket character pairs
* @param[in, out] sum_array Array of partial sums
*
* @return void
*---------------------------------------------------------------------------**/
void sumBrackets(
pos_key_pair* brackets, int bracket_count,
char* open_chars, char* close_chars, int bracket_char_cnt,
const BlockSumArray& sum_array) {
int block_size = 0;
int min_grid_size = 0;
CUDA_TRY(hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size,
sumBracketsKernel));
const int gridSize = divCeil(sum_array.length, static_cast<uint64_t>(block_size));
hipLaunchKernelGGL(( sumBracketsKernel), dim3(gridSize), dim3(block_size), 0, 0,
brackets, bracket_count,
open_chars, close_chars, bracket_char_cnt,
sum_array);
CUDA_TRY(hipGetLastError());
};
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that computes partial sums of the input elements
*
* @param[in] elements Array of input elements to sum
* @param[in, out] aggregate Array of partial sums
*
* @return void
*---------------------------------------------------------------------------**/
__global__
void aggregateSumKernel(BlockSumArray elements, BlockSumArray aggregate){
const uint64_t aggregate_idx = threadIdx.x + (blockDim.x * blockIdx.x);
const int aggregate_group_size = aggregate.block_size / elements.block_size;
const uint64_t first_in_idx = aggregate_idx * aggregate_group_size;
if (aggregate_idx >= aggregate.length)
return;
int16_t sum = 0;
for (int in_idx = first_in_idx; in_idx < first_in_idx + aggregate_group_size; ++in_idx) {
sum += elements.d_sums[in_idx];
}
aggregate.d_sums[aggregate_idx] = sum;
}
/**---------------------------------------------------------------------------*
* @brief Wrapper around aggregateSumKernel
*
* @param[in] elements Array of input elements to sum
* @param[in, out] aggregate Array of partial sums
*
* @return void
*---------------------------------------------------------------------------**/
void aggregateSum(const BlockSumArray& elements, const BlockSumArray& aggregate){
int block_size = 0;
int min_grid_size = 0;
CUDA_TRY(hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size,
aggregateSumKernel));
const int grid_size = divCeil(aggregate.length, static_cast<uint64_t>(block_size));
hipLaunchKernelGGL(( aggregateSumKernel), dim3(grid_size), dim3(block_size), 0, 0, elements, aggregate);
CUDA_TRY(hipGetLastError());
};
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that assigns levels to each bracket,
* with 1 being the top level
*
* The algorithm uses the pyramid of partial sums to compute the levels
* in parallel, in log(n) time per block of elements.
*
* @param[in] brackets Array of brackets, in (offset, char) format
* @param[in] count Number of brackets
* @param[in] sum_pyramid Pyramid of aggregated partial sums, where
* higher levels aggregate more elements per block
* @param[in] pyramid_height Number of levels in the sum_pyramid
* @param[in] open_chars Array of characters to treat as open brackets
* @param[in] close_chars Array of characters to treat as close brackets
* @param[in] bracket_char_cnt Number of bracket character pairs
* @param[out] levels Array of output levels, one per bracket
*
* @return void
*---------------------------------------------------------------------------**/
__global__
void assignLevelsKernel(
const pos_key_pair* brackets, uint64_t count,
const BlockSumArray* sum_pyramid, int pyramid_height,
const char* open_chars, const char* close_chars, int bracket_char_cnt,
int16_t* levels) {
// Process the number of elements equal to the aggregation rate, if the pyramid is used
// Process all elements otherwise
const auto to_process = pyramid_height != 0 ? sum_pyramid[0].block_size : count;
const uint64_t tid = threadIdx.x + (blockDim.x * blockIdx.x);
const uint64_t first_bracket_idx = tid * to_process;
if (first_bracket_idx >= count)
return;
// Find the total sum of levels before the current block
int sum = 0;
if (pyramid_height != 0) {
const auto aggregation_rate = sum_pyramid[0].block_size;
int level = pyramid_height - 1;
int block_idx = 0;
int offset = first_bracket_idx;
while(offset) {
// Look for the highest level that can be used with the current offset
while(offset < sum_pyramid[level].block_size && level > 0) {
--level; block_idx *= aggregation_rate;
}
// Add up the blocks in the current level while the offset is after/at the block end
while(offset >= sum_pyramid[level].block_size) {
offset -= sum_pyramid[level].block_size;
sum += sum_pyramid[level].d_sums[block_idx];
++block_idx;
}
}
}
// Assign levels, update current level based on the encountered brackets
const auto last_bracket_idx = min(first_bracket_idx + to_process, count) - 1;
for (uint64_t bracket_idx = first_bracket_idx; bracket_idx <= last_bracket_idx; ++bracket_idx){
for (int bchar_idx = 0; bchar_idx < bracket_char_cnt; ++bchar_idx) {
if (brackets[bracket_idx].second == open_chars[bchar_idx]) {
levels[bracket_idx] = ++sum;
break;
}
else if (brackets[bracket_idx].second == close_chars[bchar_idx]) {
levels[bracket_idx] = sum--;
break;
}
}
}
}
/**---------------------------------------------------------------------------*
* @brief Wrapper around assignLevelsKernel
*
* @param[in] brackets Array of brackets, in (offset, char) format
* @param[in] count Number of brackets
* @param[in] sum_pyramid Pyramid of aggregated partial sums, where
* higher levels aggregate more elements per block
* @param[in] pyramid_height Number of levels in the sum_pyramid
* @param[in] open_chars Array of characters to treat as open brackets
* @param[in] close_chars Array of characters to treat as close brackets
* @param[in] bracket_char_cnt Number of bracket character pairs
* @param[out] levels Array of outout levels
*
* @return void
*---------------------------------------------------------------------------**/
void assignLevels(pos_key_pair* brackets, uint64_t count,
const BlockSumPyramid& sum_pyramid,
char* open_chars, char* close_chars, int bracket_char_cnt,
int16_t* levels) {
int block_size = 0;
int min_grid_size = 0;
CUDA_TRY(hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size,
assignLevelsKernel));
const int thread_cnt = divCeil(count, static_cast<uint64_t>(sum_pyramid.getAggregationRate()));
const int grid_size = divCeil(thread_cnt, block_size);
hipLaunchKernelGGL(( assignLevelsKernel), dim3(grid_size), dim3(block_size), 0, 0,
brackets, count,
sum_pyramid.deviceGetLevels(), sum_pyramid.getHeight(),
open_chars, close_chars, bracket_char_cnt,
levels);
CUDA_TRY(hipGetLastError());
};
/**---------------------------------------------------------------------------*
* @brief Computes nested levels for each of the brackets in the input array
*
* The input array of brackets is sorted before levels are computed.
* The algorithms assumes well-formed input, i.e. brackets are correctly nested
* and there are no brackets that should be ignored (e.g. qouted brackets)
* Brackets at the top level are assigned level 1.
*
* @param[in] brackets Device memory array of brackets, in (offset, key) format
* @param[in] count Number of brackets
* @param[in] open_chars string of characters to treat as open brackets
* @param[in] close_chars string of characters to treat as close brackets
*
* @return rmm::device_vector<int16_t> Device vector containing bracket levels
*---------------------------------------------------------------------------**/
rmm::device_vector<int16_t> getBracketLevels(
pos_key_pair* brackets, int count,
const std::string& open_chars, const std::string& close_chars){
// TODO: consider moving sort() out of this function
thrust::sort(rmm::exec_policy()->on(0), brackets, brackets + count);
// Total bracket level difference within each segment of brackets
BlockSumPyramid aggregated_sums(count);
CUDF_EXPECTS(open_chars.size() == close_chars.size(),
"The number of open and close bracket characters must be equal.");
// Copy the open/close chars to device
rmm::device_buffer d_open_chars(open_chars.data(), open_chars.size());
rmm::device_buffer d_close_chars(close_chars.data(), close_chars.size());
if (aggregated_sums.getHeight() != 0) {
sumBrackets(
brackets, count, static_cast<char *>(d_open_chars.data()),
static_cast<char *>(d_close_chars.data()), open_chars.size(),
aggregated_sums[0]);
for (size_t level_idx = 1; level_idx < aggregated_sums.getHeight(); ++level_idx)
aggregateSum(aggregated_sums[level_idx - 1], aggregated_sums[level_idx]);
}
rmm::device_vector<int16_t> d_levels(count);
assignLevels(
brackets, count, aggregated_sums,
static_cast<char *>(d_open_chars.data()),
static_cast<char *>(d_close_chars.data()),
open_chars.size(), d_levels.data().get());
return std::move(d_levels);
}
| 5cd9ae051e830e97ca276f080e086c75dfce3a4e.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file parsing_utils.cu Utility functions for parsing plain-text files
*
*/
#include "parsing_utils.cuh"
#include <cuda_runtime.h>
#include <vector>
#include <memory>
#include <iostream>
#include <utilities/error_utils.hpp>
#include <io/utilities/wrapper_utils.hpp>
#include <rmm/device_buffer.hpp>
// When processing the input in chunks, this is the maximum size of each chunk.
// Only one chunk is loaded on the GPU at a time, so this value is chosen to
// be small enough to fit on the GPU in most cases.
constexpr size_t max_chunk_bytes = 256*1024*1024; // 256MB
constexpr int bytes_per_find_thread = 64;
using pos_key_pair = thrust::pair<uint64_t,char>;
template <typename T>
constexpr T divCeil(T dividend, T divisor) noexcept { return (dividend + divisor - 1) / divisor; }
/**---------------------------------------------------------------------------*
* @brief Sets the specified element of the array to the passed value
*---------------------------------------------------------------------------**/
template<class T, class V>
__device__ __forceinline__
void setElement(T* array, cudf::size_type idx, const T& t, const V& v){
array[idx] = t;
}
/**---------------------------------------------------------------------------*
* @brief Sets the specified element of the array of pairs using the two passed
* parameters.
*---------------------------------------------------------------------------**/
template<class T, class V>
__device__ __forceinline__
void setElement(thrust::pair<T, V>* array, cudf::size_type idx, const T& t, const V& v) {
array[idx] = {t, v};
}
/**---------------------------------------------------------------------------*
* @brief Overloads the setElement() functions for void* arrays.
* Does not do anything, indexing is not allowed with void* arrays.
*---------------------------------------------------------------------------**/
template<class T, class V>
__device__ __forceinline__
void setElement(void* array, cudf::size_type idx, const T& t, const V& v) {
}
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that finds all occurrences of a character in the given
* character array. If the 'positions' parameter is not void*,
* positions of all occurrences are stored in the output array.
*
* @param[in] data Pointer to the input character array
* @param[in] size Number of bytes in the input array
* @param[in] offset Offset to add to the output positions
* @param[in] key Character to find in the array
* @param[in,out] count Pointer to the number of found occurrences
* @param[out] positions Array containing the output positions
*
* @return void
*---------------------------------------------------------------------------**/
template<class T>
__global__
void countAndSetPositions(char *data, uint64_t size, uint64_t offset, const char key, cudf::size_type* count,
T* positions) {
// thread IDs range per block, so also need the block id
const uint64_t tid = threadIdx.x + (blockDim.x * blockIdx.x);
const uint64_t did = tid * bytes_per_find_thread;
const char *raw = (data + did);
const long byteToProcess = ((did + bytes_per_find_thread) < size) ?
bytes_per_find_thread :
(size - did);
// Process the data
for (long i = 0; i < byteToProcess; i++) {
if (raw[i] == key) {
const auto idx = atomicAdd(count, (cudf::size_type)1);
setElement(positions, idx, did + offset + i, key);
}
}
}
/**---------------------------------------------------------------------------*
* @brief Searches the input character array for each of characters in a set.
* Sums up the number of occurrences. If the 'positions' parameter is not void*,
* positions of all occurrences are stored in the output device array.
*
* Does not load the entire file into the GPU memory at any time, so it can
* be used to parse large files. Output array needs to be preallocated.
*
* @param[in] h_data Pointer to the input character array
* @param[in] h_size Number of bytes in the input array
* @param[in] keys Vector containing the keys to count in the buffer
* @param[in] result_offset Offset to add to the output positions
* @param[out] positions Array containing the output positions
*
* @return cudf::size_type total number of occurrences
*---------------------------------------------------------------------------**/
template<class T>
cudf::size_type findAllFromSet(const char *h_data, size_t h_size, const std::vector<char>& keys, uint64_t result_offset,
T *positions) {
rmm::device_buffer d_chunk(std::min(max_chunk_bytes, h_size));
rmm::device_vector<cudf::size_type> d_count(1, 0);
int block_size = 0; // suggested thread count to use
int min_grid_size = 0; // minimum block count required
CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, countAndSetPositions<T>) );
const size_t chunk_count = divCeil(h_size, max_chunk_bytes);
for (size_t ci = 0; ci < chunk_count; ++ci) {
const auto chunk_offset = ci * max_chunk_bytes;
const auto h_chunk = h_data + chunk_offset;
const int chunk_bytes = std::min((size_t)(h_size - ci * max_chunk_bytes), max_chunk_bytes);
const auto chunk_bits = divCeil(chunk_bytes, bytes_per_find_thread);
const int grid_size = divCeil(chunk_bits, block_size);
// Copy chunk to device
CUDA_TRY(cudaMemcpyAsync(d_chunk.data(), h_chunk, chunk_bytes, cudaMemcpyDefault));
for (char key: keys) {
countAndSetPositions<T> <<< grid_size, block_size >>> (
static_cast<char *>(d_chunk.data()), chunk_bytes,
chunk_offset + result_offset, key, d_count.data().get(), positions);
}
}
return d_count[0];
}
/**---------------------------------------------------------------------------*
* @brief Searches the input character array for each of characters in a set
* and sums up the number of occurrences.
*
* Does not load the entire buffer into the GPU memory at any time, so it can
* be used with buffers of any size.
*
* @param[in] h_data Pointer to the data in host memory
* @param[in] h_size Size of the input data, in bytes
* @param[in] keys Vector containing the keys to count in the buffer
*
* @return cudf::size_type total number of occurrences
*---------------------------------------------------------------------------**/
cudf::size_type countAllFromSet(const char *h_data, size_t h_size, const std::vector<char>& keys) {
return findAllFromSet<void>(h_data, h_size, keys, 0, nullptr);
}
template cudf::size_type findAllFromSet<uint64_t>(const char *h_data, size_t h_size, const std::vector<char>& keys, uint64_t result_offset,
uint64_t *positions);
template cudf::size_type findAllFromSet<pos_key_pair>(const char *h_data, size_t h_size, const std::vector<char>& keys, uint64_t result_offset,
pos_key_pair *positions);
/**
* @brief A class representing an array of partial sums, stored in the GPU memory.
*
* The object is a reference to the device memory,
* it does not own the allocated buffer.
**/
struct BlockSumArray {
int16_t* d_sums = nullptr; ///< Array of partial sums
uint64_t length = 0; ///< Length of the array
uint64_t block_size = 1; ///< The number of elements aggregated into each partial sum
BlockSumArray(uint64_t len, uint64_t bsize): length(len), block_size(bsize){}
BlockSumArray() noexcept = default;
};
/**
* @brief A class that stores a pyramid of aggregated sums, in the GPU memory.
*
* Pyramid levels are stored bottom to top; each level is aggregation_rate
* times smaller than the previous one, rounded down.
* Objects of this type own the allocated memory.
**/
class BlockSumPyramid {
const uint16_t aggregation_rate_ = 32; ///< Aggregation rate between each level of the pyramid
thrust::host_vector<BlockSumArray> h_levels_; ///< Host: pyramid levels (lowest to highest)
rmm::device_vector<BlockSumArray> d_levels_; ///< Device: pyramid levels (lowest to highest)
public:
BlockSumPyramid(int input_count){
// input parameter is the number of elements aggregated with this pyramid
int prev_count = input_count;
int prev_block_size = 1;
while (prev_count >= aggregation_rate_) {
// We round down when computing the level sizes. Thus, there may be some elements in the input
// array that are outside of the pyramid (up to aggregation_rate_ - 1 elements).
h_levels_.push_back(BlockSumArray(prev_count/aggregation_rate_, prev_block_size*aggregation_rate_));
RMM_ALLOC(&h_levels_.back().d_sums, h_levels_.back().length*sizeof(int16_t), 0);
prev_count = h_levels_.back().length;
prev_block_size = h_levels_.back().block_size;
}
if (!h_levels_.empty()) {
d_levels_ = h_levels_;
}
}
auto operator[](int level_idx) const {return h_levels_[level_idx];}
auto deviceGetLevels() const noexcept {return d_levels_.data().get();}
auto getHeight() const noexcept {return h_levels_.size();}
auto getAggregationRate() const {return aggregation_rate_;}
// disable copying
BlockSumPyramid(BlockSumPyramid&) = delete;
BlockSumPyramid& operator=(BlockSumPyramid&) = delete;
~BlockSumPyramid() {
for (auto& level: h_levels_) {
RMM_FREE(level.d_sums, 0);
}
}
};
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that aggregates bracket nesting levels for each block
* in the input array.
*
* Each sum is the level difference between the first bracket in the block,
* and the first bracket in the next block (if any). For example, "[[]]" = 0,
* because all open brackets are closed. "[[]" = 1, because the one unmatched
* open bracket would raise the level of all subsequent elements.
*
* @param[in] brackets Array of brackets, in (offset, char) format
* @param[in] bracket_count Number of brackets
* @param[in] open_chars Array of characters to treat as open brackets
* @param[in] close_chars Array of characters to treat as close brackets
* @param[in] bracket_char_cnt Number of bracket character pairs
* @param[in, out] sum_array Array of partial sums
*
* @return void
*---------------------------------------------------------------------------**/
__global__
void sumBracketsKernel(
pos_key_pair* brackets, int bracket_count,
const char* open_chars, const char* close_chars, int bracket_char_cnt,
BlockSumArray sum_array) {
const uint64_t sum_idx = threadIdx.x + (blockDim.x * blockIdx.x);
const uint64_t first_in_idx = sum_idx * sum_array.block_size;
if (sum_idx >= sum_array.length)
return;
int16_t sum = 0;
for (uint64_t in_idx = first_in_idx; in_idx < first_in_idx + sum_array.block_size; ++in_idx) {
for (int bchar_idx = 0; bchar_idx < bracket_char_cnt; ++bchar_idx) {
if (brackets[in_idx].second == open_chars[bchar_idx]) {
++sum;
break;
}
if (brackets[in_idx].second == close_chars[bchar_idx]) {
--sum;
break;
}
}
}
sum_array.d_sums[sum_idx] = sum;
}
/**---------------------------------------------------------------------------*
* @brief Wrapper around sumBracketsKernel
*
* @param[in] brackets Array of brackets, in (offset, char) format
* @param[in] bracket_count Number of brackets
* @param[in] open_chars Array of characters to treat as open brackets
* @param[in] close_chars Array of characters to treat as close brackets
* @param[in] bracket_char_cnt Number of bracket character pairs
* @param[in, out] sum_array Array of partial sums
*
* @return void
*---------------------------------------------------------------------------**/
void sumBrackets(
pos_key_pair* brackets, int bracket_count,
char* open_chars, char* close_chars, int bracket_char_cnt,
const BlockSumArray& sum_array) {
int block_size = 0;
int min_grid_size = 0;
CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size,
sumBracketsKernel));
const int gridSize = divCeil(sum_array.length, static_cast<uint64_t>(block_size));
sumBracketsKernel<<<gridSize, block_size>>>(
brackets, bracket_count,
open_chars, close_chars, bracket_char_cnt,
sum_array);
CUDA_TRY(cudaGetLastError());
};
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that computes partial sums of the input elements
*
* @param[in] elements Array of input elements to sum
* @param[in, out] aggregate Array of partial sums
*
* @return void
*---------------------------------------------------------------------------**/
__global__
void aggregateSumKernel(BlockSumArray elements, BlockSumArray aggregate){
const uint64_t aggregate_idx = threadIdx.x + (blockDim.x * blockIdx.x);
const int aggregate_group_size = aggregate.block_size / elements.block_size;
const uint64_t first_in_idx = aggregate_idx * aggregate_group_size;
if (aggregate_idx >= aggregate.length)
return;
int16_t sum = 0;
for (int in_idx = first_in_idx; in_idx < first_in_idx + aggregate_group_size; ++in_idx) {
sum += elements.d_sums[in_idx];
}
aggregate.d_sums[aggregate_idx] = sum;
}
/**---------------------------------------------------------------------------*
* @brief Wrapper around aggregateSumKernel
*
* @param[in] elements Array of input elements to sum
* @param[in, out] aggregate Array of partial sums
*
* @return void
*---------------------------------------------------------------------------**/
void aggregateSum(const BlockSumArray& elements, const BlockSumArray& aggregate){
int block_size = 0;
int min_grid_size = 0;
CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size,
aggregateSumKernel));
const int grid_size = divCeil(aggregate.length, static_cast<uint64_t>(block_size));
aggregateSumKernel<<<grid_size, block_size>>>(elements, aggregate);
CUDA_TRY(cudaGetLastError());
};
/**---------------------------------------------------------------------------*
* @brief CUDA kernel that assigns levels to each bracket,
* with 1 being the top level
*
* The algorithm uses the pyramid of partial sums to compute the levels
* in parallel, in log(n) time per block of elements.
*
* @param[in] brackets Array of brackets, in (offset, char) format
* @param[in] count Number of brackets
* @param[in] sum_pyramid Pyramid of aggregated partial sums, where
* higher levels aggregate more elements per block
* @param[in] pyramid_height Number of levels in the sum_pyramid
* @param[in] open_chars Array of characters to treat as open brackets
* @param[in] close_chars Array of characters to treat as close brackets
* @param[in] bracket_char_cnt Number of bracket character pairs
* @param[out] levels Array of output levels, one per bracket
*
* @return void
*---------------------------------------------------------------------------**/
__global__
void assignLevelsKernel(
const pos_key_pair* brackets, uint64_t count,
const BlockSumArray* sum_pyramid, int pyramid_height,
const char* open_chars, const char* close_chars, int bracket_char_cnt,
int16_t* levels) {
// Process the number of elements equal to the aggregation rate, if the pyramid is used
// Process all elements otherwise
const auto to_process = pyramid_height != 0 ? sum_pyramid[0].block_size : count;
const uint64_t tid = threadIdx.x + (blockDim.x * blockIdx.x);
const uint64_t first_bracket_idx = tid * to_process;
if (first_bracket_idx >= count)
return;
// Find the total sum of levels before the current block
int sum = 0;
if (pyramid_height != 0) {
const auto aggregation_rate = sum_pyramid[0].block_size;
int level = pyramid_height - 1;
int block_idx = 0;
int offset = first_bracket_idx;
while(offset) {
// Look for the highest level that can be used with the current offset
while(offset < sum_pyramid[level].block_size && level > 0) {
--level; block_idx *= aggregation_rate;
}
// Add up the blocks in the current level while the offset is after/at the block end
while(offset >= sum_pyramid[level].block_size) {
offset -= sum_pyramid[level].block_size;
sum += sum_pyramid[level].d_sums[block_idx];
++block_idx;
}
}
}
// Assign levels, update current level based on the encountered brackets
const auto last_bracket_idx = min(first_bracket_idx + to_process, count) - 1;
for (uint64_t bracket_idx = first_bracket_idx; bracket_idx <= last_bracket_idx; ++bracket_idx){
for (int bchar_idx = 0; bchar_idx < bracket_char_cnt; ++bchar_idx) {
if (brackets[bracket_idx].second == open_chars[bchar_idx]) {
levels[bracket_idx] = ++sum;
break;
}
else if (brackets[bracket_idx].second == close_chars[bchar_idx]) {
levels[bracket_idx] = sum--;
break;
}
}
}
}
/**---------------------------------------------------------------------------*
* @brief Wrapper around assignLevelsKernel
*
* @param[in] brackets Array of brackets, in (offset, char) format
* @param[in] count Number of brackets
* @param[in] sum_pyramid Pyramid of aggregated partial sums, where
* higher levels aggregate more elements per block
* @param[in] pyramid_height Number of levels in the sum_pyramid
* @param[in] open_chars Array of characters to treat as open brackets
* @param[in] close_chars Array of characters to treat as close brackets
* @param[in] bracket_char_cnt Number of bracket character pairs
* @param[out] levels Array of outout levels
*
* @return void
*---------------------------------------------------------------------------**/
void assignLevels(pos_key_pair* brackets, uint64_t count,
const BlockSumPyramid& sum_pyramid,
char* open_chars, char* close_chars, int bracket_char_cnt,
int16_t* levels) {
int block_size = 0;
int min_grid_size = 0;
CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size,
assignLevelsKernel));
const int thread_cnt = divCeil(count, static_cast<uint64_t>(sum_pyramid.getAggregationRate()));
const int grid_size = divCeil(thread_cnt, block_size);
assignLevelsKernel<<<grid_size, block_size>>>(
brackets, count,
sum_pyramid.deviceGetLevels(), sum_pyramid.getHeight(),
open_chars, close_chars, bracket_char_cnt,
levels);
CUDA_TRY(cudaGetLastError());
};
/**---------------------------------------------------------------------------*
* @brief Computes nested levels for each of the brackets in the input array
*
* The input array of brackets is sorted before levels are computed.
* The algorithms assumes well-formed input, i.e. brackets are correctly nested
* and there are no brackets that should be ignored (e.g. qouted brackets)
* Brackets at the top level are assigned level 1.
*
* @param[in] brackets Device memory array of brackets, in (offset, key) format
* @param[in] count Number of brackets
* @param[in] open_chars string of characters to treat as open brackets
* @param[in] close_chars string of characters to treat as close brackets
*
* @return rmm::device_vector<int16_t> Device vector containing bracket levels
*---------------------------------------------------------------------------**/
rmm::device_vector<int16_t> getBracketLevels(
pos_key_pair* brackets, int count,
const std::string& open_chars, const std::string& close_chars){
// TODO: consider moving sort() out of this function
thrust::sort(rmm::exec_policy()->on(0), brackets, brackets + count);
// Total bracket level difference within each segment of brackets
BlockSumPyramid aggregated_sums(count);
CUDF_EXPECTS(open_chars.size() == close_chars.size(),
"The number of open and close bracket characters must be equal.");
// Copy the open/close chars to device
rmm::device_buffer d_open_chars(open_chars.data(), open_chars.size());
rmm::device_buffer d_close_chars(close_chars.data(), close_chars.size());
if (aggregated_sums.getHeight() != 0) {
sumBrackets(
brackets, count, static_cast<char *>(d_open_chars.data()),
static_cast<char *>(d_close_chars.data()), open_chars.size(),
aggregated_sums[0]);
for (size_t level_idx = 1; level_idx < aggregated_sums.getHeight(); ++level_idx)
aggregateSum(aggregated_sums[level_idx - 1], aggregated_sums[level_idx]);
}
rmm::device_vector<int16_t> d_levels(count);
assignLevels(
brackets, count, aggregated_sums,
static_cast<char *>(d_open_chars.data()),
static_cast<char *>(d_close_chars.data()),
open_chars.size(), d_levels.data().get());
return std::move(d_levels);
}
|
decd5e6c5681ce8e590af877a222978a25787f12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixCurves.h"
#include <cuda/helpers.h>
#include <random.h>
#include <sutil/vec_math.h>
extern "C" {
__constant__ Params params;
}
static __forceinline__ __device__ void setPayload( float3 p )
{
optixSetPayload_0( float_as_int( p.x ) );
optixSetPayload_1( float_as_int( p.y ) );
optixSetPayload_2( float_as_int( p.z ) );
}
static __forceinline__ __device__ void computeRay( uint3 idx, uint3 dim, float3& origin, float3& direction )
{
const float3 U = params.cam_u;
const float3 V = params.cam_v;
const float3 W = params.cam_w;
const float2 d = 2.0f * make_float2(
static_cast<float>( idx.x ) / static_cast<float>( dim.x ),
static_cast<float>( idx.y ) / static_cast<float>( dim.y )
) - 1.0f;
origin = params.cam_eye;
direction = normalize( d.x * U + d.y * V + W );
}
extern "C" __global__ void __raygen__basic()
{
// Lookup our location within the launch grid
const uint3 idx = optixGetLaunchIndex();
const uint3 dim = optixGetLaunchDimensions();
// Map our launch idx to a screen location and create a ray from the camera
// location through the screen
float3 ray_origin, ray_direction;
computeRay( idx, dim, ray_origin, ray_direction );
// Trace the ray against our scene hierarchy
unsigned int p0, p1, p2;
optixTrace(
params.handle,
ray_origin,
ray_direction,
0.0f, // Min intersection distance
1e16f, // Max intersection distance
0.0f, // rayTime -- used for motion blur
OptixVisibilityMask( 255 ), // Specify always visible
OPTIX_RAY_FLAG_NONE,
0, // SBT offset -- See SBT discussion
1, // SBT stride -- See SBT discussion
0, // missSBTIndex -- See SBT discussion
p0, p1, p2 );
float3 result;
result.x = int_as_float( p0 );
result.y = int_as_float( p1 );
result.z = int_as_float( p2 );
// Record results in our output raster
params.image[idx.y * params.image_width + idx.x] = make_color( result );
}
extern "C" __global__ void __raygen__motion_blur()
{
// Lookup our location within the launch grid
const uint3 idx = optixGetLaunchIndex();
const uint3 dim = optixGetLaunchDimensions();
// Map our launch idx to a screen location and create a ray from the camera
// location through the screen
float3 ray_origin, ray_direction;
computeRay( idx, dim, ray_origin, ray_direction );
// Trace the ray against our scene hierarchy
unsigned int p0, p1, p2;
const int NUM_SAMPLES = 100;
float3 result = {};
unsigned int seed = tea<4>(idx.y * dim.y + dim.x, idx.x);
for( int i = 0; i < NUM_SAMPLES; ++i )
{
const float ray_time = rnd(seed); // compute next random ray time in [0, 1[
optixTrace( params.handle, ray_origin, ray_direction,
0.0f, // Min intersection distance
1e16f, // Max intersection distance
ray_time, // rayTime -- used for motion blur
OptixVisibilityMask( 255 ), // Specify always visible
OPTIX_RAY_FLAG_NONE,
0, // SBT offset -- See SBT discussion
1, // SBT stride -- See SBT discussion
0, // missSBTIndex -- See SBT discussion
p0, p1, p2 );
result.x += int_as_float( p0 );
result.y += int_as_float( p1 );
result.z += int_as_float( p2 );
}
// Record results in our output raster
params.image[idx.y * params.image_width + idx.x] = make_color( result / NUM_SAMPLES );
}
extern "C" __global__ void __miss__ms()
{
MissData* miss_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
setPayload( miss_data->bg_color );
}
extern "C" __global__ void __closesthit__ch()
{
// When built-in curve intersection is used, the curve parameter u is provided
// by the OptiX API. The parameters range is [0,1] over the curve segment,
// with u=0 or u=1 only on the end caps.
float u = optixGetCurveParameter();
// linearly interpolate from black to orange
setPayload( make_float3( u, u / 3.0f, 0.0f ) );
}
| decd5e6c5681ce8e590af877a222978a25787f12.cu | //
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixCurves.h"
#include <cuda/helpers.h>
#include <random.h>
#include <sutil/vec_math.h>
extern "C" {
__constant__ Params params;
}
static __forceinline__ __device__ void setPayload( float3 p )
{
optixSetPayload_0( float_as_int( p.x ) );
optixSetPayload_1( float_as_int( p.y ) );
optixSetPayload_2( float_as_int( p.z ) );
}
static __forceinline__ __device__ void computeRay( uint3 idx, uint3 dim, float3& origin, float3& direction )
{
const float3 U = params.cam_u;
const float3 V = params.cam_v;
const float3 W = params.cam_w;
const float2 d = 2.0f * make_float2(
static_cast<float>( idx.x ) / static_cast<float>( dim.x ),
static_cast<float>( idx.y ) / static_cast<float>( dim.y )
) - 1.0f;
origin = params.cam_eye;
direction = normalize( d.x * U + d.y * V + W );
}
extern "C" __global__ void __raygen__basic()
{
// Lookup our location within the launch grid
const uint3 idx = optixGetLaunchIndex();
const uint3 dim = optixGetLaunchDimensions();
// Map our launch idx to a screen location and create a ray from the camera
// location through the screen
float3 ray_origin, ray_direction;
computeRay( idx, dim, ray_origin, ray_direction );
// Trace the ray against our scene hierarchy
unsigned int p0, p1, p2;
optixTrace(
params.handle,
ray_origin,
ray_direction,
0.0f, // Min intersection distance
1e16f, // Max intersection distance
0.0f, // rayTime -- used for motion blur
OptixVisibilityMask( 255 ), // Specify always visible
OPTIX_RAY_FLAG_NONE,
0, // SBT offset -- See SBT discussion
1, // SBT stride -- See SBT discussion
0, // missSBTIndex -- See SBT discussion
p0, p1, p2 );
float3 result;
result.x = int_as_float( p0 );
result.y = int_as_float( p1 );
result.z = int_as_float( p2 );
// Record results in our output raster
params.image[idx.y * params.image_width + idx.x] = make_color( result );
}
extern "C" __global__ void __raygen__motion_blur()
{
// Lookup our location within the launch grid
const uint3 idx = optixGetLaunchIndex();
const uint3 dim = optixGetLaunchDimensions();
// Map our launch idx to a screen location and create a ray from the camera
// location through the screen
float3 ray_origin, ray_direction;
computeRay( idx, dim, ray_origin, ray_direction );
// Trace the ray against our scene hierarchy
unsigned int p0, p1, p2;
const int NUM_SAMPLES = 100;
float3 result = {};
unsigned int seed = tea<4>(idx.y * dim.y + dim.x, idx.x);
for( int i = 0; i < NUM_SAMPLES; ++i )
{
const float ray_time = rnd(seed); // compute next random ray time in [0, 1[
optixTrace( params.handle, ray_origin, ray_direction,
0.0f, // Min intersection distance
1e16f, // Max intersection distance
ray_time, // rayTime -- used for motion blur
OptixVisibilityMask( 255 ), // Specify always visible
OPTIX_RAY_FLAG_NONE,
0, // SBT offset -- See SBT discussion
1, // SBT stride -- See SBT discussion
0, // missSBTIndex -- See SBT discussion
p0, p1, p2 );
result.x += int_as_float( p0 );
result.y += int_as_float( p1 );
result.z += int_as_float( p2 );
}
// Record results in our output raster
params.image[idx.y * params.image_width + idx.x] = make_color( result / NUM_SAMPLES );
}
extern "C" __global__ void __miss__ms()
{
MissData* miss_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
setPayload( miss_data->bg_color );
}
extern "C" __global__ void __closesthit__ch()
{
// When built-in curve intersection is used, the curve parameter u is provided
// by the OptiX API. The parameter’s range is [0,1] over the curve segment,
// with u=0 or u=1 only on the end caps.
float u = optixGetCurveParameter();
// linearly interpolate from black to orange
setPayload( make_float3( u, u / 3.0f, 0.0f ) );
}
|
3d4792fb0e701d4617f888c72c4d50468654c482.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/contrastive_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const bool preforward_flag) {
const int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(), // a
bottom[1]->gpu_data(), // b
diff_.mutable_gpu_data()); // a_i-b_i
caffe_gpu_powx(
count,
diff_.mutable_gpu_data(), // a_i-b_i
Dtype(2),
diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2
caffe_gpu_gemv(
CblasNoTrans,
bottom[0]->num(),
bottom[0]->channels(),
Dtype(1.0),
diff_sq_.gpu_data(), // (a_i-b_i)^2
summer_vec_.gpu_data(),
Dtype(0.0),
dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
bool legacy_version =
this->layer_param_.contrastive_loss_param().legacy_version();
Dtype loss(0.0);
for (int i = 0; i < bottom[0]->num(); ++i) {
if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs
loss += dist_sq_.cpu_data()[i];
} else { // dissimilar pairs
if (legacy_version) {
loss += ::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0));
} else {
Dtype dist = ::max(margin - sqrt(dist_sq_.cpu_data()[i]),
Dtype(0.0));
loss += dist*dist;
}
}
}
loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void CLLBackward(const int count, const int channels,
const Dtype margin, const bool legacy_version, const Dtype alpha,
const Dtype* y, const Dtype* diff, const Dtype* dist_sq,
Dtype *bottom_diff) {
CUDA_KERNEL_LOOP(i, count) {
int n = i / channels; // the num index, to access y and dist_sq
if (static_cast<int>(y[n])) { // similar pairs
bottom_diff[i] = alpha * diff[i];
} else { // dissimilar pairs
Dtype mdist(0.0);
Dtype beta(0.0);
if (legacy_version) {
mdist = (margin - dist_sq[n]);
beta = -alpha;
} else {
Dtype dist = sqrt(dist_sq[n]);
mdist = (margin - dist);
beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i];
}
if (mdist > 0.0) {
bottom_diff[i] = beta;
} else {
bottom_diff[i] = 0;
}
}
}
}
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom,
const bool prebackward_flag) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const int count = bottom[0]->count();
const int channels = bottom[0]->channels();
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
const bool legacy_version =
this->layer_param_.contrastive_loss_param().legacy_version();
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(bottom[0]->num());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( CLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, margin, legacy_version, alpha,
bottom[2]->gpu_data(), // pair similarity 0 or 1
diff_.gpu_data(), // the cached eltwise difference between a and b
dist_sq_.gpu_data(), // the cached square distance between a and b
bottom[i]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveLossLayer);
} // namespace caffe
| 3d4792fb0e701d4617f888c72c4d50468654c482.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/contrastive_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const bool preforward_flag) {
const int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(), // a
bottom[1]->gpu_data(), // b
diff_.mutable_gpu_data()); // a_i-b_i
caffe_gpu_powx(
count,
diff_.mutable_gpu_data(), // a_i-b_i
Dtype(2),
diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2
caffe_gpu_gemv(
CblasNoTrans,
bottom[0]->num(),
bottom[0]->channels(),
Dtype(1.0),
diff_sq_.gpu_data(), // (a_i-b_i)^2
summer_vec_.gpu_data(),
Dtype(0.0),
dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
bool legacy_version =
this->layer_param_.contrastive_loss_param().legacy_version();
Dtype loss(0.0);
for (int i = 0; i < bottom[0]->num(); ++i) {
if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs
loss += dist_sq_.cpu_data()[i];
} else { // dissimilar pairs
if (legacy_version) {
loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0));
} else {
Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]),
Dtype(0.0));
loss += dist*dist;
}
}
}
loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void CLLBackward(const int count, const int channels,
const Dtype margin, const bool legacy_version, const Dtype alpha,
const Dtype* y, const Dtype* diff, const Dtype* dist_sq,
Dtype *bottom_diff) {
CUDA_KERNEL_LOOP(i, count) {
int n = i / channels; // the num index, to access y and dist_sq
if (static_cast<int>(y[n])) { // similar pairs
bottom_diff[i] = alpha * diff[i];
} else { // dissimilar pairs
Dtype mdist(0.0);
Dtype beta(0.0);
if (legacy_version) {
mdist = (margin - dist_sq[n]);
beta = -alpha;
} else {
Dtype dist = sqrt(dist_sq[n]);
mdist = (margin - dist);
beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i];
}
if (mdist > 0.0) {
bottom_diff[i] = beta;
} else {
bottom_diff[i] = 0;
}
}
}
}
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom,
const bool prebackward_flag) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const int count = bottom[0]->count();
const int channels = bottom[0]->channels();
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
const bool legacy_version =
this->layer_param_.contrastive_loss_param().legacy_version();
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(bottom[0]->num());
// NOLINT_NEXT_LINE(whitespace/operators)
CLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, margin, legacy_version, alpha,
bottom[2]->gpu_data(), // pair similarity 0 or 1
diff_.gpu_data(), // the cached eltwise difference between a and b
dist_sq_.gpu_data(), // the cached square distance between a and b
bottom[i]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveLossLayer);
} // namespace caffe
|
c454ddd95c0f6cd7a761ee15a7792ab6d665fa91.hip | // !!! This is a file automatically generated by hipify!!!
//in place 1d dst
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
// includes, project
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include "hip/device_functions.h"
#include "transformfunc.h"
# define M_PI 3.14159265358979323846
// #define TIME_TEST
hipfftHandle plan_dst3_nocubic;
#ifdef TIME_TEST
static long long time_preOp, time_postOp,time_trans_xzy,time_trans_zyx,time_cufft;
static void timeBegin(struct timeval *tBegin){
gettimeofday(tBegin, NULL);
}
static long long timeEnd(struct timeval tBegin){
struct timeval tEnd;
gettimeofday(&tEnd, NULL);
long long usec=(tEnd.tv_sec-tBegin.tv_sec)*1000*1000+tEnd.tv_usec-tBegin.tv_usec;
return usec;
}
#endif
//outN2fft
__global__ void preOp_dst3_inplace(double* in,int outN,int batch){
int itx = threadIdx.x;
int ibx = blockIdx.x;
int iby = blockIdx.y;
double *pin=in+iby*(outN+2)*(batch+1)+ibx*(outN+2);
if(itx<outN/2+1){
double sina;
double cosa;
sincos((itx)*M_PI/(2*outN),&sina,&cosa);
// double sina= sin((itx)*M_PI/(2*outN));
// double cosa= cos((itx)*M_PI/(2*outN));
double Ta= (pin[itx]+pin[outN-itx]);
double Tb= (pin[itx]-pin[outN-itx]);
// __syncthreads();
pin[itx] = Ta*cosa+Tb*sina;
pin[outN-itx]= Ta*sina-Tb*cosa;
}
}
__global__ void postOp_dst3_inplace(double* in,int N,int batch){
extern __shared__ double sh_in[];
int itx = threadIdx.x;
int ibx = blockIdx.x;
int iby = blockIdx.y;
double *pin=in+iby*(N+2)*(batch+1)+ibx*(N+2);
if(itx<N/2+1){
sh_in[itx]=pin[itx];
sh_in[itx+N/2+1]=pin[itx+N/2+1];
__syncthreads();
//222
//bkbk+-ak
if(itx!=0){
sh_in[itx*2+1]=(-1)*sh_in[itx*2+1];
}
__syncthreads();
//DCTDST2fftw1/2
if(itx==0){
pin[0]=0;
pin[1]=sh_in[0]/2;
}else{
pin[2*itx]=(sh_in[itx*2+1]-sh_in[itx*2])/2;
if(itx*2+1<N+1){
pin[2*itx+1]=(sh_in[itx*2]+sh_in[itx*2+1])/2;
}
}
}
}
//DATA_SIZE2^n+1
void dofft_inplace(double *d_data , int DATA_SIZE,int batch,int nLayer){
int n[1]={DATA_SIZE-1};
int inembeb[1]={DATA_SIZE+1};
int onembeb[1]={(DATA_SIZE+1)/2};
hipfftResult r = hipfftPlanMany(&plan_dst3_nocubic,1,n,
inembeb,1,DATA_SIZE+1,
onembeb,1,(DATA_SIZE+1)/2,
HIPFFT_D2Z, (batch+1)*(nLayer+1));
if(r!=0){
printf("CUFFT FAILED! ERROR CODE: %s\n",cufftresultcode[r]);
exit(0);
}
hipfftExecD2Z(plan_dst3_nocubic, reinterpret_cast<double *>(d_data),
reinterpret_cast<hipfftDoubleComplex *>(d_data));
}
//DATA_SIZE+2
//DATA_SIZEfft+2
//DATA_SIZE2^N+1
void run_3d_dst_3_inplace_nocubic(double *d_data , int DATA_SIZE,int batch,int nLayer){
#ifdef TIME_TEST
time_postOp=0;
time_preOp=0;
time_trans_xzy=0;
time_trans_zyx=0;
time_cufft=0;
#endif
dim3 preOpGridDim;
preOpGridDim.x=batch;
preOpGridDim.y=nLayer;
preOpGridDim.z=1;
#ifdef TIME_TEST
struct timeval tBegin1;
timeBegin(&tBegin1);
#endif
hipLaunchKernelGGL(( preOp_dst3_inplace), dim3(preOpGridDim),dim3(DATA_SIZE/2+1), 0, 0, d_data,DATA_SIZE-1,batch);
#ifdef TIME_TEST
hipDeviceSynchronize();
time_preOp = timeEnd(tBegin1);
#endif
#ifdef TIME_TEST
struct timeval tBegin2;
timeBegin(&tBegin2);
#endif
dofft_inplace(d_data,DATA_SIZE,batch,nLayer);
#ifdef TIME_TEST
hipDeviceSynchronize();
time_cufft += timeEnd(tBegin2);
#endif
dim3 postOpGridDim;
postOpGridDim.x=batch;
postOpGridDim.y=nLayer;
postOpGridDim.z=1;
#ifdef TIME_TEST
struct timeval tBegin3;
timeBegin(&tBegin3);
#endif
hipLaunchKernelGGL(( postOp_dst3_inplace), dim3(postOpGridDim),dim3(DATA_SIZE/2+1),sizeof(double)*(DATA_SIZE+1), 0, d_data,DATA_SIZE-1,batch);
#ifdef TIME_TEST
hipDeviceSynchronize();
time_postOp += timeEnd(tBegin3);
#endif
#ifdef TIME_TEST
hipDeviceSynchronize();
printf("timepreOp: count=3 totaltime=%lld avetime=%lld \n",time_preOp,time_preOp/3);
printf("timepostOp: count=3 totaltime=%lld avetime=%lld \n",time_postOp,time_postOp/3);
printf("timecufft: count=3 totaltime=%lld avetime=%lld \n",time_cufft,time_cufft/3);
printf("timetransxzy: count=2 totaltime=%lld avetime=%lld \n",time_trans_xzy,time_trans_xzy/2);
printf("timetranszyx: count=2 totaltime=%lld avetime=%lld \n",time_trans_zyx,time_trans_zyx/2);
#endif
freeMemory_dst3();
}
void run_3d_dst_3_inplace(double *d_data , int DATA_SIZE,hipfftHandle &plan_dst3_cubic){
#ifdef TIME_TEST
time_postOp=0;
time_preOp=0;
time_trans_xzy=0;
time_trans_zyx=0;
time_cufft=0;
#endif
dim3 preOpGridDim;
preOpGridDim.x=DATA_SIZE;
preOpGridDim.y=DATA_SIZE;
preOpGridDim.z=1;
#ifdef TIME_TEST
struct timeval tBegin1;
timeBegin(&tBegin1);
#endif
hipLaunchKernelGGL(( preOp_dst3_inplace), dim3(preOpGridDim),dim3(DATA_SIZE/2+1), 0, 0, d_data,DATA_SIZE-1,DATA_SIZE);
#ifdef TIME_TEST
hipDeviceSynchronize();
time_preOp = timeEnd(tBegin1);
#endif
#ifdef TIME_TEST
struct timeval tBegin2;
timeBegin(&tBegin2);
#endif
hipfftExecD2Z(plan_dst3_cubic, reinterpret_cast<double *>(d_data),
reinterpret_cast<hipfftDoubleComplex *>(d_data));
#ifdef TIME_TEST
hipDeviceSynchronize();
time_cufft += timeEnd(tBegin2);
#endif
dim3 postOpGridDim;
postOpGridDim.x=DATA_SIZE;
postOpGridDim.y=DATA_SIZE;
postOpGridDim.z=1;
#ifdef TIME_TEST
struct timeval tBegin3;
timeBegin(&tBegin3);
#endif
hipLaunchKernelGGL(( postOp_dst3_inplace), dim3(postOpGridDim),dim3(DATA_SIZE/2+1),sizeof(double)*(DATA_SIZE+1), 0, d_data,DATA_SIZE-1,DATA_SIZE);
#ifdef TIME_TEST
hipDeviceSynchronize();
time_postOp += timeEnd(tBegin3);
#endif
#ifdef TIME_TEST
hipDeviceSynchronize();
printf("timepreOp: count=3 totaltime=%lld avetime=%lld \n",time_preOp,time_preOp/3);
printf("timepostOp: count=3 totaltime=%lld avetime=%lld \n",time_postOp,time_postOp/3);
printf("timecufft: count=3 totaltime=%lld avetime=%lld \n",time_cufft,time_cufft/3);
printf("timetransxzy: count=2 totaltime=%lld avetime=%lld \n",time_trans_xzy,time_trans_xzy/2);
printf("timetranszyx: count=2 totaltime=%lld avetime=%lld \n",time_trans_zyx,time_trans_zyx/2);
#endif
}
//for nocubic
void freeMemory_dst3(){
hipfftDestroy(plan_dst3_nocubic);
}
| c454ddd95c0f6cd7a761ee15a7792ab6d665fa91.cu | //in place 1d dst
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
// includes, project
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
#include "device_functions.h"
#include "transformfunc.h"
# define M_PI 3.14159265358979323846
// #define TIME_TEST
cufftHandle plan_dst3_nocubic;
#ifdef TIME_TEST
static long long time_preOp, time_postOp,time_trans_xzy,time_trans_zyx,time_cufft;
static void timeBegin(struct timeval *tBegin){
gettimeofday(tBegin, NULL);
}
static long long timeEnd(struct timeval tBegin){
struct timeval tEnd;
gettimeofday(&tEnd, NULL);
long long usec=(tEnd.tv_sec-tBegin.tv_sec)*1000*1000+tEnd.tv_usec-tBegin.tv_usec;
return usec;
}
#endif
//outN是2的整数次幂,就是处理之后的要进行fft的数组的长度
__global__ void preOp_dst3_inplace(double* in,int outN,int batch){
int itx = threadIdx.x;
int ibx = blockIdx.x;
int iby = blockIdx.y;
double *pin=in+iby*(outN+2)*(batch+1)+ibx*(outN+2);
if(itx<outN/2+1){
double sina;
double cosa;
sincos((itx)*M_PI/(2*outN),&sina,&cosa);
// double sina= sin((itx)*M_PI/(2*outN));
// double cosa= cos((itx)*M_PI/(2*outN));
double Ta= (pin[itx]+pin[outN-itx]);
double Tb= (pin[itx]-pin[outN-itx]);
// __syncthreads();
pin[itx] = Ta*cosa+Tb*sina;
pin[outN-itx]= Ta*sina-Tb*cosa;
}
}
__global__ void postOp_dst3_inplace(double* in,int N,int batch){
extern __shared__ double sh_in[];
int itx = threadIdx.x;
int ibx = blockIdx.x;
int iby = blockIdx.y;
double *pin=in+iby*(N+2)*(batch+1)+ibx*(N+2);
if(itx<N/2+1){
sh_in[itx]=pin[itx];
sh_in[itx+N/2+1]=pin[itx+N/2+1];
__syncthreads();
//因为乘了2还得除2,就都不进行乘2操作了
//这里bk值得符号也省略掉,在下方运算bk+-ak的时候要注意正负号
if(itx!=0){
sh_in[itx*2+1]=(-1)*sh_in[itx*2+1];
}
__syncthreads();
//与其他的DCT和DST一致,也除以2,这样算出的结果是fftw的1/2
if(itx==0){
pin[0]=0;
pin[1]=sh_in[0]/2;
}else{
pin[2*itx]=(sh_in[itx*2+1]-sh_in[itx*2])/2;
if(itx*2+1<N+1){
pin[2*itx+1]=(sh_in[itx*2]+sh_in[itx*2+1])/2;
}
}
}
}
//这里的DATA_SIZE是2^n+1
void dofft_inplace(double *d_data , int DATA_SIZE,int batch,int nLayer){
int n[1]={DATA_SIZE-1};
int inembeb[1]={DATA_SIZE+1};
int onembeb[1]={(DATA_SIZE+1)/2};
cufftResult r = cufftPlanMany(&plan_dst3_nocubic,1,n,
inembeb,1,DATA_SIZE+1,
onembeb,1,(DATA_SIZE+1)/2,
CUFFT_D2Z, (batch+1)*(nLayer+1));
if(r!=0){
printf("CUFFT FAILED! ERROR CODE: %s\n",cufftresultcode[r]);
exit(0);
}
cufftExecD2Z(plan_dst3_nocubic, reinterpret_cast<double *>(d_data),
reinterpret_cast<cufftDoubleComplex *>(d_data));
}
//这里先默认传入的矩阵每行是DATA_SIZE+2的,后续在考虑是否要进行填充
//DATA_SIZE是需要做fft的数组的长度,传入的长度一般是+2的
//这里的DATA_SIZE是2^N+1
void run_3d_dst_3_inplace_nocubic(double *d_data , int DATA_SIZE,int batch,int nLayer){
#ifdef TIME_TEST
time_postOp=0;
time_preOp=0;
time_trans_xzy=0;
time_trans_zyx=0;
time_cufft=0;
#endif
dim3 preOpGridDim;
preOpGridDim.x=batch;
preOpGridDim.y=nLayer;
preOpGridDim.z=1;
#ifdef TIME_TEST
struct timeval tBegin1;
timeBegin(&tBegin1);
#endif
preOp_dst3_inplace<<<preOpGridDim,DATA_SIZE/2+1>>>(d_data,DATA_SIZE-1,batch);
#ifdef TIME_TEST
cudaDeviceSynchronize();
time_preOp = timeEnd(tBegin1);
#endif
#ifdef TIME_TEST
struct timeval tBegin2;
timeBegin(&tBegin2);
#endif
dofft_inplace(d_data,DATA_SIZE,batch,nLayer);
#ifdef TIME_TEST
cudaDeviceSynchronize();
time_cufft += timeEnd(tBegin2);
#endif
dim3 postOpGridDim;
postOpGridDim.x=batch;
postOpGridDim.y=nLayer;
postOpGridDim.z=1;
#ifdef TIME_TEST
struct timeval tBegin3;
timeBegin(&tBegin3);
#endif
postOp_dst3_inplace<<<postOpGridDim,DATA_SIZE/2+1,sizeof(double)*(DATA_SIZE+1)>>>(d_data,DATA_SIZE-1,batch);
#ifdef TIME_TEST
cudaDeviceSynchronize();
time_postOp += timeEnd(tBegin3);
#endif
#ifdef TIME_TEST
cudaDeviceSynchronize();
printf("timepreOp: count=3 totaltime=%lld avetime=%lld \n",time_preOp,time_preOp/3);
printf("timepostOp: count=3 totaltime=%lld avetime=%lld \n",time_postOp,time_postOp/3);
printf("timecufft: count=3 totaltime=%lld avetime=%lld \n",time_cufft,time_cufft/3);
printf("timetransxzy: count=2 totaltime=%lld avetime=%lld \n",time_trans_xzy,time_trans_xzy/2);
printf("timetranszyx: count=2 totaltime=%lld avetime=%lld \n",time_trans_zyx,time_trans_zyx/2);
#endif
freeMemory_dst3();
}
void run_3d_dst_3_inplace(double *d_data , int DATA_SIZE,cufftHandle &plan_dst3_cubic){
#ifdef TIME_TEST
time_postOp=0;
time_preOp=0;
time_trans_xzy=0;
time_trans_zyx=0;
time_cufft=0;
#endif
dim3 preOpGridDim;
preOpGridDim.x=DATA_SIZE;
preOpGridDim.y=DATA_SIZE;
preOpGridDim.z=1;
#ifdef TIME_TEST
struct timeval tBegin1;
timeBegin(&tBegin1);
#endif
preOp_dst3_inplace<<<preOpGridDim,DATA_SIZE/2+1>>>(d_data,DATA_SIZE-1,DATA_SIZE);
#ifdef TIME_TEST
cudaDeviceSynchronize();
time_preOp = timeEnd(tBegin1);
#endif
#ifdef TIME_TEST
struct timeval tBegin2;
timeBegin(&tBegin2);
#endif
cufftExecD2Z(plan_dst3_cubic, reinterpret_cast<double *>(d_data),
reinterpret_cast<cufftDoubleComplex *>(d_data));
#ifdef TIME_TEST
cudaDeviceSynchronize();
time_cufft += timeEnd(tBegin2);
#endif
dim3 postOpGridDim;
postOpGridDim.x=DATA_SIZE;
postOpGridDim.y=DATA_SIZE;
postOpGridDim.z=1;
#ifdef TIME_TEST
struct timeval tBegin3;
timeBegin(&tBegin3);
#endif
postOp_dst3_inplace<<<postOpGridDim,DATA_SIZE/2+1,sizeof(double)*(DATA_SIZE+1)>>>(d_data,DATA_SIZE-1,DATA_SIZE);
#ifdef TIME_TEST
cudaDeviceSynchronize();
time_postOp += timeEnd(tBegin3);
#endif
#ifdef TIME_TEST
cudaDeviceSynchronize();
printf("timepreOp: count=3 totaltime=%lld avetime=%lld \n",time_preOp,time_preOp/3);
printf("timepostOp: count=3 totaltime=%lld avetime=%lld \n",time_postOp,time_postOp/3);
printf("timecufft: count=3 totaltime=%lld avetime=%lld \n",time_cufft,time_cufft/3);
printf("timetransxzy: count=2 totaltime=%lld avetime=%lld \n",time_trans_xzy,time_trans_xzy/2);
printf("timetranszyx: count=2 totaltime=%lld avetime=%lld \n",time_trans_zyx,time_trans_zyx/2);
#endif
}
//for nocubic
void freeMemory_dst3(){
cufftDestroy(plan_dst3_nocubic);
}
|
e9e3290335575a0db8710e5a638c65f911a98cde.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include "SyncedMemory.h"
#define CHECK {\
auto e = hipDeviceSynchronize();\
if (e != hipSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\
abort();\
}\
}
__global__ void SomeTransform(char *input_gpu, int fsize) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < fsize && input_gpu[idx] != '\n') {
//input_gpu[idx] = '!';
//transform to capital
if(input_gpu[idx] >= 'a' && input_gpu[idx] <= 'z'){
input_gpu[idx] -= 32;
}
//swap all pairs in all words
if(input_gpu[idx] == ' ' || idx==0){
char* cur;
if(idx==0)
cur = input_gpu;
else
cur = input_gpu+idx+1;
while((*cur>'A'&&*cur<'Z')||(*cur>'a'&&*cur<'z')){
if((*(cur+1)>'A'&&*(cur+1)<'Z')||(*(cur+1)>'a'&&*(cur+1)<'z')){
char temp = *cur;
*cur = *(cur+1);
*(cur+1) = temp;
cur+=2;
}
else
break;
}
}
}
}
int main(int argc, char **argv)
{
// init, and check
if (argc != 2) {
printf("Usage %s <input text file>\n", argv[0]);
abort();
}
FILE *fp = fopen(argv[1], "r");
if (!fp) {
printf("Cannot open %s", argv[1]);
abort();
}
// get file size
fseek(fp, 0, SEEK_END);
size_t fsize = ftell(fp);
fseek(fp, 0, SEEK_SET);
// read files
MemoryBuffer<char> text(fsize+1);
auto text_smem = text.CreateSync(fsize);
CHECK;
fread(text_smem.get_cpu_wo(), 1, fsize, fp);
text_smem.get_cpu_wo()[fsize] = '\0';
fclose(fp);
// TODO: do your transform here
char *input_gpu = text_smem.get_gpu_rw();
// An example: transform the first 64 characters to '!'
// Don't transform over the tail
// And don't transform the line breaks
hipLaunchKernelGGL(( SomeTransform), dim3(fsize/64+1), dim3(64), 0, 0, input_gpu, fsize);
puts(text_smem.get_cpu_ro());
return 0;
}
| e9e3290335575a0db8710e5a638c65f911a98cde.cu | #include <cstdio>
#include <cstdlib>
#include "SyncedMemory.h"
#define CHECK {\
auto e = cudaDeviceSynchronize();\
if (e != cudaSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\
abort();\
}\
}
__global__ void SomeTransform(char *input_gpu, int fsize) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < fsize && input_gpu[idx] != '\n') {
//input_gpu[idx] = '!';
//transform to capital
if(input_gpu[idx] >= 'a' && input_gpu[idx] <= 'z'){
input_gpu[idx] -= 32;
}
//swap all pairs in all words
if(input_gpu[idx] == ' ' || idx==0){
char* cur;
if(idx==0)
cur = input_gpu;
else
cur = input_gpu+idx+1;
while((*cur>'A'&&*cur<'Z')||(*cur>'a'&&*cur<'z')){
if((*(cur+1)>'A'&&*(cur+1)<'Z')||(*(cur+1)>'a'&&*(cur+1)<'z')){
char temp = *cur;
*cur = *(cur+1);
*(cur+1) = temp;
cur+=2;
}
else
break;
}
}
}
}
int main(int argc, char **argv)
{
// init, and check
if (argc != 2) {
printf("Usage %s <input text file>\n", argv[0]);
abort();
}
FILE *fp = fopen(argv[1], "r");
if (!fp) {
printf("Cannot open %s", argv[1]);
abort();
}
// get file size
fseek(fp, 0, SEEK_END);
size_t fsize = ftell(fp);
fseek(fp, 0, SEEK_SET);
// read files
MemoryBuffer<char> text(fsize+1);
auto text_smem = text.CreateSync(fsize);
CHECK;
fread(text_smem.get_cpu_wo(), 1, fsize, fp);
text_smem.get_cpu_wo()[fsize] = '\0';
fclose(fp);
// TODO: do your transform here
char *input_gpu = text_smem.get_gpu_rw();
// An example: transform the first 64 characters to '!'
// Don't transform over the tail
// And don't transform the line breaks
SomeTransform<<<fsize/64+1, 64>>>(input_gpu, fsize);
puts(text_smem.get_cpu_ro());
return 0;
}
|
7f0e40990d4ff7af88ab5aee8374b43fa7a4c77b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <thrust/device_ptr.h>
#include <thrust/count.h>
#include <thrust/execution_policy.h>
#include <iostream>
#include <helper_cuda.h>
/////////////////////////////////////////////////////////////////
// Some utility code to define grid_stride_range
// Normally this would be in a header but it's here
// for didactic purposes. Uses
#include "range.hpp"
using namespace util::lang;
// type alias to simplify typing...
template<typename T>
using step_range = typename range_proxy<T>::step_range_proxy;
template <typename T>
__device__
step_range<T> grid_stride_range(T begin, T end) {
begin += blockDim.x * blockIdx.x + threadIdx.x;
return range(begin, end).step(gridDim.x * blockDim.x);
}
/////////////////////////////////////////////////////////////////
template <typename T, typename Predicate>
__device__
void count_if(int *count, T *data, int n, Predicate p)
{
for (auto i : grid_stride_range(0, n)) {
if (p(data[i])) atomicAdd(count, 1);
}
}
// Use count_if with a lambda function that searches for x, y, z or w
// Note the use of range-based for loop and initializer_list inside the functor
// We use auto so we don't have to know the type of the functor or array
__global__
void xyzw_frequency(int *count, char *text, int n)
{
const char letters[] { 'x','y','z','w' };
count_if(count, text, n, [&](char c) {
for (const auto x : letters)
if (c == x) return true;
return false;
});
}
__global__
void xyzw_frequency_thrust_device(int *count, char *text, int n)
{
const char letters[] { 'x','y','z','w' };
*count = thrust::count_if(thrust::device, text, text+n, [=](char c) {
for (const auto x : letters)
if (c == x) return true;
return false;
});
}
// a bug in Thrust 1.8 causes warnings when this is uncommented
// so commented out by default -- fixed in Thrust master branch
#if 0
void xyzw_frequency_thrust_host(int *count, char *text, int n)
{
const char letters[] { 'x','y','z','w' };
*count = thrust::count_if(thrust::host, text, text+n, [&](char c) {
for (const auto x : letters)
if (c == x) return true;
return false;
});
}
#endif
int main(int argc, char** argv)
{
const char *filename = sdkFindFilePath("warandpeace.txt", argv[0]);
int numBytes = 16*1048576;
char *h_text = (char*)malloc(numBytes);
// // find first CUDA device
// int devID = findCudaDevice(argc, (const char **)argv);
int devID = 0;
if(argc == 2) {
devID = atoi(argv[1]);
}
printf("select device : %d\n", devID);
hipSetDevice(devID);
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
char *d_text;
checkCudaErrors(hipMalloc((void**)&d_text, numBytes));
FILE *fp = fopen(filename, "r");
if (fp == NULL)
{
printf("Cannot find the input text file\n. Exiting..\n");
return EXIT_FAILURE;
}
int len = fread(h_text, sizeof(char), numBytes, fp);
fclose(fp);
std::cout << "Read " << len << " byte corpus from " << filename << std::endl;
checkCudaErrors(hipMemcpy(d_text, h_text, len, hipMemcpyHostToDevice));
int count = 0;
int *d_count;
checkCudaErrors(hipMalloc(&d_count, sizeof(int)));
checkCudaErrors(hipMemset(d_count, 0, sizeof(int)));
// Try uncommenting one kernel call at a time
hipLaunchKernelGGL(( xyzw_frequency), dim3(8), dim3(256), 0, 0, d_count, d_text, len);
hipLaunchKernelGGL(( xyzw_frequency_thrust_device), dim3(1), dim3(1), 0, 0, d_count, d_text, len);
checkCudaErrors(hipMemcpy(&count, d_count, sizeof(int), hipMemcpyDeviceToHost));
//xyzw_frequency_thrust_host(&count, h_text, len);
std::cout << "counted " << count << " instances of 'x', 'y', 'z', or 'w' in \""
<< filename << "\"" << std::endl;
checkCudaErrors(hipFree(d_count));
checkCudaErrors(hipFree(d_text));
return EXIT_SUCCESS;
}
| 7f0e40990d4ff7af88ab5aee8374b43fa7a4c77b.cu | /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <thrust/device_ptr.h>
#include <thrust/count.h>
#include <thrust/execution_policy.h>
#include <iostream>
#include <helper_cuda.h>
/////////////////////////////////////////////////////////////////
// Some utility code to define grid_stride_range
// Normally this would be in a header but it's here
// for didactic purposes. Uses
#include "range.hpp"
using namespace util::lang;
// type alias to simplify typing...
template<typename T>
using step_range = typename range_proxy<T>::step_range_proxy;
template <typename T>
__device__
step_range<T> grid_stride_range(T begin, T end) {
begin += blockDim.x * blockIdx.x + threadIdx.x;
return range(begin, end).step(gridDim.x * blockDim.x);
}
/////////////////////////////////////////////////////////////////
template <typename T, typename Predicate>
__device__
void count_if(int *count, T *data, int n, Predicate p)
{
for (auto i : grid_stride_range(0, n)) {
if (p(data[i])) atomicAdd(count, 1);
}
}
// Use count_if with a lambda function that searches for x, y, z or w
// Note the use of range-based for loop and initializer_list inside the functor
// We use auto so we don't have to know the type of the functor or array
__global__
void xyzw_frequency(int *count, char *text, int n)
{
const char letters[] { 'x','y','z','w' };
count_if(count, text, n, [&](char c) {
for (const auto x : letters)
if (c == x) return true;
return false;
});
}
__global__
void xyzw_frequency_thrust_device(int *count, char *text, int n)
{
const char letters[] { 'x','y','z','w' };
*count = thrust::count_if(thrust::device, text, text+n, [=](char c) {
for (const auto x : letters)
if (c == x) return true;
return false;
});
}
// a bug in Thrust 1.8 causes warnings when this is uncommented
// so commented out by default -- fixed in Thrust master branch
#if 0
void xyzw_frequency_thrust_host(int *count, char *text, int n)
{
const char letters[] { 'x','y','z','w' };
*count = thrust::count_if(thrust::host, text, text+n, [&](char c) {
for (const auto x : letters)
if (c == x) return true;
return false;
});
}
#endif
int main(int argc, char** argv)
{
const char *filename = sdkFindFilePath("warandpeace.txt", argv[0]);
int numBytes = 16*1048576;
char *h_text = (char*)malloc(numBytes);
// // find first CUDA device
// int devID = findCudaDevice(argc, (const char **)argv);
int devID = 0;
if(argc == 2) {
devID = atoi(argv[1]);
}
printf("select device : %d\n", devID);
cudaSetDevice(devID);
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
char *d_text;
checkCudaErrors(cudaMalloc((void**)&d_text, numBytes));
FILE *fp = fopen(filename, "r");
if (fp == NULL)
{
printf("Cannot find the input text file\n. Exiting..\n");
return EXIT_FAILURE;
}
int len = fread(h_text, sizeof(char), numBytes, fp);
fclose(fp);
std::cout << "Read " << len << " byte corpus from " << filename << std::endl;
checkCudaErrors(cudaMemcpy(d_text, h_text, len, cudaMemcpyHostToDevice));
int count = 0;
int *d_count;
checkCudaErrors(cudaMalloc(&d_count, sizeof(int)));
checkCudaErrors(cudaMemset(d_count, 0, sizeof(int)));
// Try uncommenting one kernel call at a time
xyzw_frequency<<<8, 256>>>(d_count, d_text, len);
xyzw_frequency_thrust_device<<<1, 1>>>(d_count, d_text, len);
checkCudaErrors(cudaMemcpy(&count, d_count, sizeof(int), cudaMemcpyDeviceToHost));
//xyzw_frequency_thrust_host(&count, h_text, len);
std::cout << "counted " << count << " instances of 'x', 'y', 'z', or 'w' in \""
<< filename << "\"" << std::endl;
checkCudaErrors(cudaFree(d_count));
checkCudaErrors(cudaFree(d_text));
return EXIT_SUCCESS;
}
|
b8734ae5a291b643a3bb7bdc6a76dd4a06281761.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "backprop.h"
// cuda kernels
#include "bpnn_layerforward.h"
#include "bpnn_adjust_weights.h"
double get_time() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
unsigned int num_threads = 0;
unsigned int num_blocks = 0;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
setup(argc, argv);
return 0;
}
int bpnn_train_kernel(BPNN *net, float *eo, float *eh)
{
int in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
float *input_weights_one_dim;
float *input_weights_prev_one_dim;
float * partial_sum;
float sum;
// Warning: the number of blocks must be less than the maximum grid dimension
unsigned int num_blocks = in / BLOCK_SIZE;
input_weights_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float));
input_weights_prev_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float));
partial_sum = (float *) malloc(num_blocks * WIDTH * sizeof(float));
// this preprocessing stage is temporarily added to correct the bug of wrong memcopy using two-dimensional net->inputweights
// todo: fix mem allocation
int m = 0;
for (int k = 0; k <= in; k++) {
for (int j = 0; j <= hid; j++) {
input_weights_one_dim[m] = net->input_weights[k][j];
input_weights_prev_one_dim[m] = net-> input_prev_weights[k][j];
m++;
}
}
printf("Performing GPU computation\n");
double offload_start = get_time();
float* d_input;
float *d_input_weights;
float *d_hidden_partial_sum;
float *d_hidden_delta;
float *d_input_prev_weights;
hipMalloc((void**)&d_input, sizeof(float)*(in+1));
hipMalloc((void**)&d_input_weights, sizeof(float)*(in+1)*(hid+1));
hipMalloc((void**)&d_hidden_partial_sum, sizeof(float)*num_blocks*WIDTH);
hipMemcpy(d_input, net->input_units, sizeof(float)*(in+1), hipMemcpyHostToDevice);
hipMemcpy(d_input_weights, input_weights_one_dim, sizeof(float)*(in+1)*(hid+1), hipMemcpyHostToDevice);
dim3 grid(1, num_blocks);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
hipLaunchKernelGGL(( kernel_layerforward), dim3(grid), dim3(threads), 0, 0, d_input, d_input_weights, d_hidden_partial_sum, hid);
hipMemcpy(partial_sum, d_hidden_partial_sum, sizeof(float)*num_blocks*WIDTH, hipMemcpyDeviceToHost);
for (int j = 1; j <= hid; j++) {
sum = 0.f;
for (unsigned int k = 0; k < num_blocks; k++) {
sum += partial_sum[k * hid + j-1] ;
}
#ifdef DEBUG
printf("j=%d sum=%f\n", j,sum);
#endif
sum += net->input_weights[0][j];
net-> hidden_units[j] = float(1.0 / (1.0 + exp(-sum)));
}
bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out);
bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err);
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights);
// input_weights has been written in the first kernel, so it needs to be restored.
hipMemcpy(d_input_weights, input_weights_one_dim, sizeof(float)*(in+1)*(hid+1), hipMemcpyHostToDevice);
hipMalloc((void**)&d_hidden_delta, sizeof(float)*(hid+1));
hipMalloc((void**)&d_input_prev_weights, sizeof(float)*(in+1)*(hid+1));
hipMemcpy(d_hidden_delta, net->hidden_delta, sizeof(float)*(hid+1), hipMemcpyHostToDevice);
hipMemcpy(d_input_prev_weights, input_weights_prev_one_dim, sizeof(float)*(in+1)*(hid+1), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_adjust_weights), dim3(grid), dim3(threads), 0, 0, d_input, d_input_weights, d_hidden_delta, d_input_prev_weights, hid);
hipMemcpy(input_weights_one_dim, d_input_weights, sizeof(float)*(in+1)*(hid+1), hipMemcpyDeviceToHost);
hipFree(d_input);
hipFree(d_input_weights);
hipFree(d_hidden_partial_sum);
hipFree(d_hidden_delta);
hipFree(d_input_prev_weights);
double offload_end = get_time();
printf("Device offloading time = %lf(s)\n", offload_end - offload_start);
#ifdef OUTPUT
for (int i = 0; i < (in+1); i++)
printf("i=%d input_units=%f\n", i,net->input_units[i]);
for (int i = 0; i < (in+1)*(hid+1); i++)
printf("i=%d input_weights=%f\n", i,input_weights_one_dim[i]);
#endif
free(input_weights_prev_one_dim);
free(partial_sum);
free(input_weights_one_dim);
return 0;
}
| b8734ae5a291b643a3bb7bdc6a76dd4a06281761.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
#include "backprop.h"
// cuda kernels
#include "bpnn_layerforward.h"
#include "bpnn_adjust_weights.h"
double get_time() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
unsigned int num_threads = 0;
unsigned int num_blocks = 0;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
setup(argc, argv);
return 0;
}
int bpnn_train_kernel(BPNN *net, float *eo, float *eh)
{
int in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
float *input_weights_one_dim;
float *input_weights_prev_one_dim;
float * partial_sum;
float sum;
// Warning: the number of blocks must be less than the maximum grid dimension
unsigned int num_blocks = in / BLOCK_SIZE;
input_weights_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float));
input_weights_prev_one_dim = (float *) malloc((in + 1)* (hid + 1) * sizeof(float));
partial_sum = (float *) malloc(num_blocks * WIDTH * sizeof(float));
// this preprocessing stage is temporarily added to correct the bug of wrong memcopy using two-dimensional net->inputweights
// todo: fix mem allocation
int m = 0;
for (int k = 0; k <= in; k++) {
for (int j = 0; j <= hid; j++) {
input_weights_one_dim[m] = net->input_weights[k][j];
input_weights_prev_one_dim[m] = net-> input_prev_weights[k][j];
m++;
}
}
printf("Performing GPU computation\n");
double offload_start = get_time();
float* d_input;
float *d_input_weights;
float *d_hidden_partial_sum;
float *d_hidden_delta;
float *d_input_prev_weights;
cudaMalloc((void**)&d_input, sizeof(float)*(in+1));
cudaMalloc((void**)&d_input_weights, sizeof(float)*(in+1)*(hid+1));
cudaMalloc((void**)&d_hidden_partial_sum, sizeof(float)*num_blocks*WIDTH);
cudaMemcpy(d_input, net->input_units, sizeof(float)*(in+1), cudaMemcpyHostToDevice);
cudaMemcpy(d_input_weights, input_weights_one_dim, sizeof(float)*(in+1)*(hid+1), cudaMemcpyHostToDevice);
dim3 grid(1, num_blocks);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
kernel_layerforward<<<grid, threads>>>(d_input, d_input_weights, d_hidden_partial_sum, hid);
cudaMemcpy(partial_sum, d_hidden_partial_sum, sizeof(float)*num_blocks*WIDTH, cudaMemcpyDeviceToHost);
for (int j = 1; j <= hid; j++) {
sum = 0.f;
for (unsigned int k = 0; k < num_blocks; k++) {
sum += partial_sum[k * hid + j-1] ;
}
#ifdef DEBUG
printf("j=%d sum=%f\n", j,sum);
#endif
sum += net->input_weights[0][j];
net-> hidden_units[j] = float(1.0 / (1.0 + exp(-sum)));
}
bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out);
bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err);
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights);
// input_weights has been written in the first kernel, so it needs to be restored.
cudaMemcpy(d_input_weights, input_weights_one_dim, sizeof(float)*(in+1)*(hid+1), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_hidden_delta, sizeof(float)*(hid+1));
cudaMalloc((void**)&d_input_prev_weights, sizeof(float)*(in+1)*(hid+1));
cudaMemcpy(d_hidden_delta, net->hidden_delta, sizeof(float)*(hid+1), cudaMemcpyHostToDevice);
cudaMemcpy(d_input_prev_weights, input_weights_prev_one_dim, sizeof(float)*(in+1)*(hid+1), cudaMemcpyHostToDevice);
kernel_adjust_weights<<<grid, threads>>>(d_input, d_input_weights, d_hidden_delta, d_input_prev_weights, hid);
cudaMemcpy(input_weights_one_dim, d_input_weights, sizeof(float)*(in+1)*(hid+1), cudaMemcpyDeviceToHost);
cudaFree(d_input);
cudaFree(d_input_weights);
cudaFree(d_hidden_partial_sum);
cudaFree(d_hidden_delta);
cudaFree(d_input_prev_weights);
double offload_end = get_time();
printf("Device offloading time = %lf(s)\n", offload_end - offload_start);
#ifdef OUTPUT
for (int i = 0; i < (in+1); i++)
printf("i=%d input_units=%f\n", i,net->input_units[i]);
for (int i = 0; i < (in+1)*(hid+1); i++)
printf("i=%d input_weights=%f\n", i,input_weights_one_dim[i]);
#endif
free(input_weights_prev_one_dim);
free(partial_sum);
free(input_weights_one_dim);
return 0;
}
|
3ae476d6143d6b1a98dbedb7cb8a0cc36860cf5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Fluid_3D_PCISPH.cuh"
namespace Fluid_3D_PCISPH {
__global__ void computeExternalForcesImpl(Particle* particles, int particleCount,float3 gravity) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= particleCount) return;
Particle& particle = particles[index];
particle.acceleration = gravity;
}
__global__ void initPressureImpl(Particle* particles, int particleCount) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= particleCount) return;
Particle& particle = particles[index];
particle.pressure = 0;
particle.pressureForces = make_float3(0, 0, 0);
}
__global__ void predictVelocityAndPositionImpl(Particle* particles, int particleCount, float timestep, bool setAsActual, float spacing, float3 gridPhysicalSize) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= particleCount) return;
Particle& particle = particles[index];
float3 acc = particle.acceleration + particle.pressureForces;
float3 vel = particle.velosity + acc * timestep;
float3 pos = particle.position + vel * timestep;
float bounce = -0.0;
float minDistanceFromWall = spacing / 2;
if (pos.x < minDistanceFromWall) {
pos.x = minDistanceFromWall;
vel.x *= bounce;;
}
if (pos.x > gridPhysicalSize.x - minDistanceFromWall) {
pos.x = gridPhysicalSize.x - minDistanceFromWall;
vel.x *= bounce;;
}
if (pos.y < minDistanceFromWall) {
pos.y = minDistanceFromWall;
vel.y *= bounce;;
}
if (pos.y > gridPhysicalSize.y - minDistanceFromWall) {
pos.y = gridPhysicalSize.y - minDistanceFromWall;
vel.y *= bounce;;
}
if (pos.z < minDistanceFromWall) {
pos.z = minDistanceFromWall;
vel.z *= bounce;;
}
if (pos.z > gridPhysicalSize.z - minDistanceFromWall) {
pos.z = gridPhysicalSize.z - minDistanceFromWall;
vel.z *= bounce;;
}
if (setAsActual) {
particle.position = pos;
particle.velosity = vel;
}
else {
particle.predictedPosition = pos;
particle.predictedVelocity = vel;
}
}
__global__ void predictDensityAndPressureImpl(Particle* particles,int particleCount, int* cellBegin, int* cellEnd, int3 gridSize, float kernelRadius,float kernelRadius2,float kernelRadius6,float kernelRadius9, bool setAsRest, float timestep) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= particleCount) return;
Particle& particle = particles[index];
float3 pos = particle.position;
int3 thisCell;
thisCell.x = pos.x / kernelRadius;
thisCell.y = pos.y / kernelRadius;
thisCell.z = pos.z / kernelRadius;
float rho0 = particle.restDensity;
float beta = timestep * timestep * 2 / (rho0 * rho0);
float density = 0;
float3 sumGradW = make_float3(0, 0, 0);
float sumGradWDot = 0;
#pragma unroll
for (int dx = -1; dx <= 1; ++dx) {
#pragma unroll
for (int dy = -1; dy <= 1; ++dy) {
#pragma unroll
for (int dz = -1; dz <= 1; ++dz) {
int x = thisCell.x + dx;
int y = thisCell.y + dy;
int z = thisCell.z + dz;
if (x < 0 || x >= gridSize.x || y < 0 || y >= gridSize.y || z < 0 || z >= gridSize.z) {
continue;
}
int hash = x * gridSize.y * gridSize.z + y * gridSize.z + z;
if (cellBegin[hash] == -1) {
continue;
}
for (int j = cellBegin[hash]; j <= cellEnd[hash]; ++j) {
Particle& that = particles[j];
float3 posDiff = particle.predictedPosition - that.predictedPosition;
float thisDensityContribution = poly6(posDiff, kernelRadius2,kernelRadius9);
density += thisDensityContribution;
float3 gradW = spikey_grad(posDiff, kernelRadius,kernelRadius6);
sumGradW += gradW;
sumGradWDot += dot(gradW, gradW);
}
}
}
}
particle.density = density;
if (setAsRest) {
particle.restDensity = density;
}
float rhoError = density - rho0;
float correctionCoeff = 1.0 / (beta * (dot(sumGradW, sumGradW) + sumGradWDot));
correctionCoeff = 50.0;
float pressureCorrection = correctionCoeff * rhoError;
particle.pressure += pressureCorrection;
}
__global__ void computePressureForceImpl(Particle* particles, int particleCount, int* cellBegin, int* cellEnd, int3 gridSize, float kernelRadius,float kernelRadius2, float kernelRadius6, float kernelRadius9 ) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= particleCount) return;
Particle& particle = particles[index];
float3 pos = particle.position;
int3 thisCell;
thisCell.x = pos.x / kernelRadius;
thisCell.y = pos.y / kernelRadius;
thisCell.z = pos.z / kernelRadius;
float3 force = make_float3(0, 0, 0);
#pragma unroll
for (int dx = -1; dx <= 1; ++dx) {
#pragma unroll
for (int dy = -1; dy <= 1; ++dy) {
#pragma unroll
for (int dz = -1; dz <= 1; ++dz) {
int x = thisCell.x + dx;
int y = thisCell.y + dy;
int z = thisCell.z + dz;
if (x < 0 || x >= gridSize.x || y < 0 || y >= gridSize.y || z < 0 || z >= gridSize.z) {
continue;
}
int hash = x * gridSize.y * gridSize.z + y * gridSize.z + z;
if (cellBegin[hash] == -1) {
continue;
}
for (int j = cellBegin[hash]; j <= cellEnd[hash]; ++j) {
Particle that = particles[j];
force -= spikey_grad(particle.predictedPosition - that.predictedPosition, kernelRadius,kernelRadius6)
* ((that.pressure / (that.density * that.density)) + (particle.pressure / (particle.density * particle.density)));
}
}
}
}
particle.pressureForces = force;
}
Fluid::Fluid() {
}
void Fluid::draw(const DrawCommand& drawCommand){
if (isMeshMode(drawCommand.renderMode)) {
hipDeviceSynchronize();
mesher->mesh(particles, particlesCopy, particleHashes, particleIndices, meshRenderer->coordsDevice);
hipDeviceSynchronize();
meshRenderer->draw(drawCommand);
}
else {
updatePositionsVBO << <numBlocks, numThreads >> > (particles, pointSprites->positionsDevice, particleCount, pointSprites->stride);
hipDeviceSynchronize();
pointSprites->draw(drawCommand, particleSpacing/2, drawCommand.texSkybox);
}
}
void Fluid::createSquareFluid(std::vector<Particle>& particlesVec, float3 minPos, float3 maxPos) {
float minDistanceFromWall = particleSpacing / 2.f;
float3 minPhysicalPos = {
minPos.x * gridPhysicalSize.x,
minPos.y* gridPhysicalSize.y,
minPos.z* gridPhysicalSize.z,
};
minPhysicalPos += make_float3(1, 1, 1) * minDistanceFromWall;
float3 maxPhysicalPos = {
maxPos.x* gridPhysicalSize.x,
maxPos.y* gridPhysicalSize.y,
maxPos.z* gridPhysicalSize.z,
};
maxPhysicalPos -= make_float3(1, 1, 1) * (minDistanceFromWall - 1e-3);
for (float x = minPhysicalPos.x ; x <= maxPhysicalPos.x; x += particleSpacing) {
for (float y = minPhysicalPos.y; y <= maxPhysicalPos.y ; y += particleSpacing) {
for (float z = minPhysicalPos.z; z <= maxPhysicalPos.z ; z += particleSpacing) {
float jitterMagnitude = particleSpacing/2.f;
float3 jitter;
jitter.x = (random0to1() - 0.5);
jitter.y = (random0to1() - 0.5);
jitter.z = (random0to1() - 0.5);
jitter *= jitterMagnitude;
float3 pos = make_float3(x, y, z);
pos += jitter;
pos.x = min(gridPhysicalSize.x - minDistanceFromWall, max(minDistanceFromWall, pos.x));
pos.y = min(gridPhysicalSize.y - minDistanceFromWall, max(minDistanceFromWall, pos.y));
pos.z = min(gridPhysicalSize.z - minDistanceFromWall, max(minDistanceFromWall, pos.z));
particlesVec.emplace_back(pos);
}
}
}
}
void Fluid::createSphereFluid(std::vector<Particle>& particlesVec, float3 center, float radius) {
float3 minPhysicalPos = {
0,0,0
};
minPhysicalPos += make_float3(1, 1, 1) * particleSpacing * 0.5;
float3 maxPhysicalPos = gridPhysicalSize;
maxPhysicalPos -= make_float3(1, 1, 1) * particleSpacing * 0.5;
float3 physicalCenter = {
center.x * gridPhysicalSize.x,
center.y * gridPhysicalSize.y,
center.z * gridPhysicalSize.z
};
float physicalRadius = radius * gridPhysicalSize.y;
for (float x = minPhysicalPos.x; x < maxPhysicalPos.x; x += particleSpacing) {
for (float y = minPhysicalPos.y; y < maxPhysicalPos.y; y += particleSpacing) {
for (float z = minPhysicalPos.z; z < maxPhysicalPos.z; z += particleSpacing) {
float3 pos = make_float3(x, y, z);
float3 jitter = make_float3(1, 1, 1);
jitter.x *= (random0to1() - 0.5)*particleSpacing*0.01;
jitter.y *= (random0to1() - 0.5) * particleSpacing * 0.01;
jitter.z *= (random0to1() - 0.5) * particleSpacing * 0.01;
if (length(pos-physicalCenter) < physicalRadius) {
particlesVec.emplace_back(pos);
}
}
}
}
}
void Fluid::init(FluidConfig config) {
particleCountWhenFull = config.PCISPH.maxParticleCount;
stiffness = config.PCISPH.stiffness;
timestep = config.PCISPH.timestep;
substeps = config.PCISPH.substeps;
iterations = config.PCISPH.iterations;
this->fluidConfig = config;
particleSpacing = pow(gridPhysicalSize.x * gridPhysicalSize.y * gridPhysicalSize.z / particleCountWhenFull, 1.0 / 3.0);
particleSpacing = gridPhysicalSize.x / ceil(gridPhysicalSize.x / particleSpacing); // so that gridPhysicalSize is exact multiple.
kernelRadius = particleSpacing * kernelRadiusToSpacingRatio;
kernelRadius2 = kernelRadius * kernelRadius;
kernelRadius6 = kernelRadius2 * kernelRadius2 * kernelRadius2;
kernelRadius9 = kernelRadius6 * kernelRadius2 * kernelRadius;
std::vector<Particle> particlesVec;
for (const InitializationVolume& vol : config.initialVolumes) {
if (vol.shapeType == ShapeType::Square) {
createSquareFluid(particlesVec, vol.boxMin, vol.boxMax);
}
else if (vol.shapeType == ShapeType::Sphere) {
createSphereFluid(particlesVec, vol.ballCenter, vol.ballRadius);
}
}
particleCount = particlesVec.size();
HANDLE_ERROR(hipMalloc(&particles, particleCount * sizeof(Particle)));
HANDLE_ERROR(hipMalloc(&particlesCopy, particleCount * sizeof(Particle)));
HANDLE_ERROR(hipMemcpy(particles, particlesVec.data(), particleCount * sizeof(Particle), hipMemcpyHostToDevice));
numThreads = min(1024, particleCount);
numBlocks = divUp(particleCount, numThreads);
gridSize.x = ceil(gridPhysicalSize.x / kernelRadius);
gridSize.y = ceil(gridPhysicalSize.y / kernelRadius);
gridSize.z = ceil(gridPhysicalSize.z / kernelRadius);
cellCount = gridSize.x * gridSize.y * gridSize.z;
HANDLE_ERROR(hipMalloc(&particleIndices, particleCount * sizeof(*particleIndices)));
HANDLE_ERROR(hipMalloc(&particleHashes, particleCount * sizeof(*particleHashes)));
HANDLE_ERROR(hipMalloc(&cellBegin, cellCount * sizeof(*cellBegin)));
HANDLE_ERROR(hipMalloc(&cellEnd, cellCount * sizeof(*cellEnd)));
pointSprites = std::make_shared<PointSprites>(particleCount);
computeRestDensity();
HANDLE_ERROR(hipMemcpy(particlesVec.data(), particles, particleCount * sizeof(Particle), hipMemcpyDeviceToHost));
float totalRestDensity = 0;
float maxDensity = 0;
float minDensity = 99999;
for (Particle& p : particlesVec) {
totalRestDensity += p.density;
maxDensity = max(maxDensity, p.density);
minDensity = min(minDensity, p.density);
}
restDensity = totalRestDensity / (float)particleCount;
float variance = 0;
for (Particle& p : particlesVec) {
variance += pow(p.density - restDensity, 2);
}
variance /= (float)particleCount;
std::cout << "particle count : " << particleCount << std::endl;
std::cout << "spacing : " << particleSpacing << std::endl;
std::cout << "kernel radius : " << kernelRadius << std::endl;
std::cout << "rho0 : " << restDensity << std::endl;
std::cout << "variance : " << variance << std::endl;
std::cout << "gridSize.x : " << gridSize.x << std::endl;
mesher = std::make_shared<Mesher>(gridPhysicalSize, particleSpacing, particleCount, numBlocks, numThreads);
meshRenderer = std::make_shared<FluidMeshRenderer>(mesher->triangleCount);
mesher->mesh(particles, particlesCopy, particleHashes, particleIndices, meshRenderer->coordsDevice);
hipDeviceSynchronize();
}
void Fluid::computeRestDensity() {
performSpatialHashing2(particleIndices, particleHashes, particles, particlesCopy, particleCount, kernelRadius, gridSize.x, gridSize.y, gridSize.z, numBlocks, numThreads, cellBegin, cellEnd, cellCount);
predictDensityAndPressureImpl << <numBlocks, numThreads >> >
(particles,particleCount, cellBegin, cellEnd, gridSize, kernelRadius, kernelRadius2, kernelRadius6, kernelRadius9, true, timestep / (float)substeps);
}
void Fluid::simulationStep() {
for (int i = 0; i < substeps; ++i) {
performSpatialHashing2(particleIndices,particleHashes, particles, particlesCopy, particleCount, kernelRadius, gridSize.x, gridSize.y, gridSize.z, numBlocks, numThreads, cellBegin, cellEnd, cellCount);
computeExternalForces();
initPressure();
int iter = 0;
while (iter < iterations) {
predictVelocityAndPosition();
predictDensityAndPressure();
computePressureForce();
iter += 1;
}
computeNewVelocityAndPosition();
}
physicalTime += timestep;
}
void Fluid::computeExternalForces() {
computeExternalForcesImpl << <numBlocks, numThreads >> > (particles, particleCount,fluidConfig.gravity);
}
void Fluid::initPressure() {
initPressureImpl << <numBlocks, numThreads >> > (particles, particleCount);
}
void Fluid::predictVelocityAndPosition() {
predictVelocityAndPositionImpl << <numBlocks, numThreads >> >
(particles, particleCount, timestep / (float)substeps, false, particleSpacing, gridPhysicalSize);
}
void Fluid::predictDensityAndPressure() {
predictDensityAndPressureImpl << <numBlocks, numThreads >> >
(particles, particleCount, cellBegin, cellEnd, gridSize, kernelRadius, kernelRadius2, kernelRadius6, kernelRadius9, false, timestep / (float)substeps);
}
void Fluid::computePressureForce() {
computePressureForceImpl << <numBlocks, numThreads >> >
(particles, particleCount, cellBegin, cellEnd, gridSize, kernelRadius, kernelRadius2, kernelRadius6, kernelRadius9 );
}
void Fluid::computeNewVelocityAndPosition() {
predictVelocityAndPositionImpl << <numBlocks, numThreads >> >
(particles, particleCount, timestep / (float)substeps, true, particleSpacing, gridPhysicalSize);
}
glm::vec3 Fluid::getCenter() {
return glm::vec3(gridPhysicalSize.x / 2, gridPhysicalSize.y / 2,gridPhysicalSize.z / 2);
}
float Fluid::getContainerSize() {
return gridPhysicalSize.x;
}
Fluid::~Fluid() {
HANDLE_ERROR(hipFree(particles));
HANDLE_ERROR(hipFree(particleHashes));
HANDLE_ERROR(hipFree(cellBegin));
HANDLE_ERROR(hipFree(cellEnd));
HANDLE_ERROR(hipFree(particleIndices));
HANDLE_ERROR(hipFree(particlesCopy));
}
} | 3ae476d6143d6b1a98dbedb7cb8a0cc36860cf5f.cu | #include "Fluid_3D_PCISPH.cuh"
namespace Fluid_3D_PCISPH {
__global__ void computeExternalForcesImpl(Particle* particles, int particleCount,float3 gravity) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= particleCount) return;
Particle& particle = particles[index];
particle.acceleration = gravity;
}
__global__ void initPressureImpl(Particle* particles, int particleCount) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= particleCount) return;
Particle& particle = particles[index];
particle.pressure = 0;
particle.pressureForces = make_float3(0, 0, 0);
}
__global__ void predictVelocityAndPositionImpl(Particle* particles, int particleCount, float timestep, bool setAsActual, float spacing, float3 gridPhysicalSize) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= particleCount) return;
Particle& particle = particles[index];
float3 acc = particle.acceleration + particle.pressureForces;
float3 vel = particle.velosity + acc * timestep;
float3 pos = particle.position + vel * timestep;
float bounce = -0.0;
float minDistanceFromWall = spacing / 2;
if (pos.x < minDistanceFromWall) {
pos.x = minDistanceFromWall;
vel.x *= bounce;;
}
if (pos.x > gridPhysicalSize.x - minDistanceFromWall) {
pos.x = gridPhysicalSize.x - minDistanceFromWall;
vel.x *= bounce;;
}
if (pos.y < minDistanceFromWall) {
pos.y = minDistanceFromWall;
vel.y *= bounce;;
}
if (pos.y > gridPhysicalSize.y - minDistanceFromWall) {
pos.y = gridPhysicalSize.y - minDistanceFromWall;
vel.y *= bounce;;
}
if (pos.z < minDistanceFromWall) {
pos.z = minDistanceFromWall;
vel.z *= bounce;;
}
if (pos.z > gridPhysicalSize.z - minDistanceFromWall) {
pos.z = gridPhysicalSize.z - minDistanceFromWall;
vel.z *= bounce;;
}
if (setAsActual) {
particle.position = pos;
particle.velosity = vel;
}
else {
particle.predictedPosition = pos;
particle.predictedVelocity = vel;
}
}
__global__ void predictDensityAndPressureImpl(Particle* particles,int particleCount, int* cellBegin, int* cellEnd, int3 gridSize, float kernelRadius,float kernelRadius2,float kernelRadius6,float kernelRadius9, bool setAsRest, float timestep) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= particleCount) return;
Particle& particle = particles[index];
float3 pos = particle.position;
int3 thisCell;
thisCell.x = pos.x / kernelRadius;
thisCell.y = pos.y / kernelRadius;
thisCell.z = pos.z / kernelRadius;
float rho0 = particle.restDensity;
float beta = timestep * timestep * 2 / (rho0 * rho0);
float density = 0;
float3 sumGradW = make_float3(0, 0, 0);
float sumGradWDot = 0;
#pragma unroll
for (int dx = -1; dx <= 1; ++dx) {
#pragma unroll
for (int dy = -1; dy <= 1; ++dy) {
#pragma unroll
for (int dz = -1; dz <= 1; ++dz) {
int x = thisCell.x + dx;
int y = thisCell.y + dy;
int z = thisCell.z + dz;
if (x < 0 || x >= gridSize.x || y < 0 || y >= gridSize.y || z < 0 || z >= gridSize.z) {
continue;
}
int hash = x * gridSize.y * gridSize.z + y * gridSize.z + z;
if (cellBegin[hash] == -1) {
continue;
}
for (int j = cellBegin[hash]; j <= cellEnd[hash]; ++j) {
Particle& that = particles[j];
float3 posDiff = particle.predictedPosition - that.predictedPosition;
float thisDensityContribution = poly6(posDiff, kernelRadius2,kernelRadius9);
density += thisDensityContribution;
float3 gradW = spikey_grad(posDiff, kernelRadius,kernelRadius6);
sumGradW += gradW;
sumGradWDot += dot(gradW, gradW);
}
}
}
}
particle.density = density;
if (setAsRest) {
particle.restDensity = density;
}
float rhoError = density - rho0;
float correctionCoeff = 1.0 / (beta * (dot(sumGradW, sumGradW) + sumGradWDot));
correctionCoeff = 50.0;
float pressureCorrection = correctionCoeff * rhoError;
particle.pressure += pressureCorrection;
}
__global__ void computePressureForceImpl(Particle* particles, int particleCount, int* cellBegin, int* cellEnd, int3 gridSize, float kernelRadius,float kernelRadius2, float kernelRadius6, float kernelRadius9 ) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= particleCount) return;
Particle& particle = particles[index];
float3 pos = particle.position;
int3 thisCell;
thisCell.x = pos.x / kernelRadius;
thisCell.y = pos.y / kernelRadius;
thisCell.z = pos.z / kernelRadius;
float3 force = make_float3(0, 0, 0);
#pragma unroll
for (int dx = -1; dx <= 1; ++dx) {
#pragma unroll
for (int dy = -1; dy <= 1; ++dy) {
#pragma unroll
for (int dz = -1; dz <= 1; ++dz) {
int x = thisCell.x + dx;
int y = thisCell.y + dy;
int z = thisCell.z + dz;
if (x < 0 || x >= gridSize.x || y < 0 || y >= gridSize.y || z < 0 || z >= gridSize.z) {
continue;
}
int hash = x * gridSize.y * gridSize.z + y * gridSize.z + z;
if (cellBegin[hash] == -1) {
continue;
}
for (int j = cellBegin[hash]; j <= cellEnd[hash]; ++j) {
Particle that = particles[j];
force -= spikey_grad(particle.predictedPosition - that.predictedPosition, kernelRadius,kernelRadius6)
* ((that.pressure / (that.density * that.density)) + (particle.pressure / (particle.density * particle.density)));
}
}
}
}
particle.pressureForces = force;
}
Fluid::Fluid() {
}
void Fluid::draw(const DrawCommand& drawCommand){
if (isMeshMode(drawCommand.renderMode)) {
cudaDeviceSynchronize();
mesher->mesh(particles, particlesCopy, particleHashes, particleIndices, meshRenderer->coordsDevice);
cudaDeviceSynchronize();
meshRenderer->draw(drawCommand);
}
else {
updatePositionsVBO << <numBlocks, numThreads >> > (particles, pointSprites->positionsDevice, particleCount, pointSprites->stride);
cudaDeviceSynchronize();
pointSprites->draw(drawCommand, particleSpacing/2, drawCommand.texSkybox);
}
}
void Fluid::createSquareFluid(std::vector<Particle>& particlesVec, float3 minPos, float3 maxPos) {
float minDistanceFromWall = particleSpacing / 2.f;
float3 minPhysicalPos = {
minPos.x * gridPhysicalSize.x,
minPos.y* gridPhysicalSize.y,
minPos.z* gridPhysicalSize.z,
};
minPhysicalPos += make_float3(1, 1, 1) * minDistanceFromWall;
float3 maxPhysicalPos = {
maxPos.x* gridPhysicalSize.x,
maxPos.y* gridPhysicalSize.y,
maxPos.z* gridPhysicalSize.z,
};
maxPhysicalPos -= make_float3(1, 1, 1) * (minDistanceFromWall - 1e-3);
for (float x = minPhysicalPos.x ; x <= maxPhysicalPos.x; x += particleSpacing) {
for (float y = minPhysicalPos.y; y <= maxPhysicalPos.y ; y += particleSpacing) {
for (float z = minPhysicalPos.z; z <= maxPhysicalPos.z ; z += particleSpacing) {
float jitterMagnitude = particleSpacing/2.f;
float3 jitter;
jitter.x = (random0to1() - 0.5);
jitter.y = (random0to1() - 0.5);
jitter.z = (random0to1() - 0.5);
jitter *= jitterMagnitude;
float3 pos = make_float3(x, y, z);
pos += jitter;
pos.x = min(gridPhysicalSize.x - minDistanceFromWall, max(minDistanceFromWall, pos.x));
pos.y = min(gridPhysicalSize.y - minDistanceFromWall, max(minDistanceFromWall, pos.y));
pos.z = min(gridPhysicalSize.z - minDistanceFromWall, max(minDistanceFromWall, pos.z));
particlesVec.emplace_back(pos);
}
}
}
}
void Fluid::createSphereFluid(std::vector<Particle>& particlesVec, float3 center, float radius) {
float3 minPhysicalPos = {
0,0,0
};
minPhysicalPos += make_float3(1, 1, 1) * particleSpacing * 0.5;
float3 maxPhysicalPos = gridPhysicalSize;
maxPhysicalPos -= make_float3(1, 1, 1) * particleSpacing * 0.5;
float3 physicalCenter = {
center.x * gridPhysicalSize.x,
center.y * gridPhysicalSize.y,
center.z * gridPhysicalSize.z
};
float physicalRadius = radius * gridPhysicalSize.y;
for (float x = minPhysicalPos.x; x < maxPhysicalPos.x; x += particleSpacing) {
for (float y = minPhysicalPos.y; y < maxPhysicalPos.y; y += particleSpacing) {
for (float z = minPhysicalPos.z; z < maxPhysicalPos.z; z += particleSpacing) {
float3 pos = make_float3(x, y, z);
float3 jitter = make_float3(1, 1, 1);
jitter.x *= (random0to1() - 0.5)*particleSpacing*0.01;
jitter.y *= (random0to1() - 0.5) * particleSpacing * 0.01;
jitter.z *= (random0to1() - 0.5) * particleSpacing * 0.01;
if (length(pos-physicalCenter) < physicalRadius) {
particlesVec.emplace_back(pos);
}
}
}
}
}
void Fluid::init(FluidConfig config) {
particleCountWhenFull = config.PCISPH.maxParticleCount;
stiffness = config.PCISPH.stiffness;
timestep = config.PCISPH.timestep;
substeps = config.PCISPH.substeps;
iterations = config.PCISPH.iterations;
this->fluidConfig = config;
particleSpacing = pow(gridPhysicalSize.x * gridPhysicalSize.y * gridPhysicalSize.z / particleCountWhenFull, 1.0 / 3.0);
particleSpacing = gridPhysicalSize.x / ceil(gridPhysicalSize.x / particleSpacing); // so that gridPhysicalSize is exact multiple.
kernelRadius = particleSpacing * kernelRadiusToSpacingRatio;
kernelRadius2 = kernelRadius * kernelRadius;
kernelRadius6 = kernelRadius2 * kernelRadius2 * kernelRadius2;
kernelRadius9 = kernelRadius6 * kernelRadius2 * kernelRadius;
std::vector<Particle> particlesVec;
for (const InitializationVolume& vol : config.initialVolumes) {
if (vol.shapeType == ShapeType::Square) {
createSquareFluid(particlesVec, vol.boxMin, vol.boxMax);
}
else if (vol.shapeType == ShapeType::Sphere) {
createSphereFluid(particlesVec, vol.ballCenter, vol.ballRadius);
}
}
particleCount = particlesVec.size();
HANDLE_ERROR(cudaMalloc(&particles, particleCount * sizeof(Particle)));
HANDLE_ERROR(cudaMalloc(&particlesCopy, particleCount * sizeof(Particle)));
HANDLE_ERROR(cudaMemcpy(particles, particlesVec.data(), particleCount * sizeof(Particle), cudaMemcpyHostToDevice));
numThreads = min(1024, particleCount);
numBlocks = divUp(particleCount, numThreads);
gridSize.x = ceil(gridPhysicalSize.x / kernelRadius);
gridSize.y = ceil(gridPhysicalSize.y / kernelRadius);
gridSize.z = ceil(gridPhysicalSize.z / kernelRadius);
cellCount = gridSize.x * gridSize.y * gridSize.z;
HANDLE_ERROR(cudaMalloc(&particleIndices, particleCount * sizeof(*particleIndices)));
HANDLE_ERROR(cudaMalloc(&particleHashes, particleCount * sizeof(*particleHashes)));
HANDLE_ERROR(cudaMalloc(&cellBegin, cellCount * sizeof(*cellBegin)));
HANDLE_ERROR(cudaMalloc(&cellEnd, cellCount * sizeof(*cellEnd)));
pointSprites = std::make_shared<PointSprites>(particleCount);
computeRestDensity();
HANDLE_ERROR(cudaMemcpy(particlesVec.data(), particles, particleCount * sizeof(Particle), cudaMemcpyDeviceToHost));
float totalRestDensity = 0;
float maxDensity = 0;
float minDensity = 99999;
for (Particle& p : particlesVec) {
totalRestDensity += p.density;
maxDensity = max(maxDensity, p.density);
minDensity = min(minDensity, p.density);
}
restDensity = totalRestDensity / (float)particleCount;
float variance = 0;
for (Particle& p : particlesVec) {
variance += pow(p.density - restDensity, 2);
}
variance /= (float)particleCount;
std::cout << "particle count : " << particleCount << std::endl;
std::cout << "spacing : " << particleSpacing << std::endl;
std::cout << "kernel radius : " << kernelRadius << std::endl;
std::cout << "rho0 : " << restDensity << std::endl;
std::cout << "variance : " << variance << std::endl;
std::cout << "gridSize.x : " << gridSize.x << std::endl;
mesher = std::make_shared<Mesher>(gridPhysicalSize, particleSpacing, particleCount, numBlocks, numThreads);
meshRenderer = std::make_shared<FluidMeshRenderer>(mesher->triangleCount);
mesher->mesh(particles, particlesCopy, particleHashes, particleIndices, meshRenderer->coordsDevice);
cudaDeviceSynchronize();
}
void Fluid::computeRestDensity() {
performSpatialHashing2(particleIndices, particleHashes, particles, particlesCopy, particleCount, kernelRadius, gridSize.x, gridSize.y, gridSize.z, numBlocks, numThreads, cellBegin, cellEnd, cellCount);
predictDensityAndPressureImpl << <numBlocks, numThreads >> >
(particles,particleCount, cellBegin, cellEnd, gridSize, kernelRadius, kernelRadius2, kernelRadius6, kernelRadius9, true, timestep / (float)substeps);
}
void Fluid::simulationStep() {
for (int i = 0; i < substeps; ++i) {
performSpatialHashing2(particleIndices,particleHashes, particles, particlesCopy, particleCount, kernelRadius, gridSize.x, gridSize.y, gridSize.z, numBlocks, numThreads, cellBegin, cellEnd, cellCount);
computeExternalForces();
initPressure();
int iter = 0;
while (iter < iterations) {
predictVelocityAndPosition();
predictDensityAndPressure();
computePressureForce();
iter += 1;
}
computeNewVelocityAndPosition();
}
physicalTime += timestep;
}
void Fluid::computeExternalForces() {
computeExternalForcesImpl << <numBlocks, numThreads >> > (particles, particleCount,fluidConfig.gravity);
}
void Fluid::initPressure() {
initPressureImpl << <numBlocks, numThreads >> > (particles, particleCount);
}
void Fluid::predictVelocityAndPosition() {
predictVelocityAndPositionImpl << <numBlocks, numThreads >> >
(particles, particleCount, timestep / (float)substeps, false, particleSpacing, gridPhysicalSize);
}
void Fluid::predictDensityAndPressure() {
predictDensityAndPressureImpl << <numBlocks, numThreads >> >
(particles, particleCount, cellBegin, cellEnd, gridSize, kernelRadius, kernelRadius2, kernelRadius6, kernelRadius9, false, timestep / (float)substeps);
}
void Fluid::computePressureForce() {
computePressureForceImpl << <numBlocks, numThreads >> >
(particles, particleCount, cellBegin, cellEnd, gridSize, kernelRadius, kernelRadius2, kernelRadius6, kernelRadius9 );
}
void Fluid::computeNewVelocityAndPosition() {
predictVelocityAndPositionImpl << <numBlocks, numThreads >> >
(particles, particleCount, timestep / (float)substeps, true, particleSpacing, gridPhysicalSize);
}
glm::vec3 Fluid::getCenter() {
return glm::vec3(gridPhysicalSize.x / 2, gridPhysicalSize.y / 2,gridPhysicalSize.z / 2);
}
float Fluid::getContainerSize() {
return gridPhysicalSize.x;
}
Fluid::~Fluid() {
HANDLE_ERROR(cudaFree(particles));
HANDLE_ERROR(cudaFree(particleHashes));
HANDLE_ERROR(cudaFree(cellBegin));
HANDLE_ERROR(cudaFree(cellEnd));
HANDLE_ERROR(cudaFree(particleIndices));
HANDLE_ERROR(cudaFree(particlesCopy));
}
} |
d7972605b58aafea47d46ca122ce9833bb2e099d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__ void person_threshold(unsigned char * pix, int cols, int count){
int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned char p = (unsigned char)0;
float ff;
if(i < count - cols ){
int diffX = ((int)pix[i+ cols] - (int)pix[i + cols- 1]);
int diffY = ((int)pix[i + cols] - (int)pix[i]);
p = (unsigned char)((int)sqrtf((diffX * diffX) + (diffY * diffY)));
__syncthreads();
pix[i] = p;
}
__syncthreads();
if( i> 0 && count - cols-1){
p = (pix[i-1] + pix[i+1] + pix[i + cols] + pix[i+1 + cols])/12;
}
__syncthreads();
pix[i] +=p;
if( i > 0 && i < count - cols - 1){
p = (pix[i-1] + pix[i+1] + pix[i + cols] + pix[i+1 + cols])/12;
}
__syncthreads();
pix[i] +p;
if( i > 0 &&i < count - cols - 1){
p = (pix[i-1] + pix[i+1] + pix[i + cols] + pix[i+1 + cols])/12;
}
__syncthreads();
pix[i] +=p;
int radius = 1;
if(i > cols + 1 && i < count - cols - 1 && i % cols != 0){
p = (
abs(pix[i-1-cols] - pix[i])
+ abs(pix[i-cols] - pix[i])
+ abs(pix[i + 1 - cols] - pix[i])
+ abs(pix[i -1] - pix[i]) + abs( pix[i + 1] - pix[i])
+ abs(pix[i - 1 + cols] - pix[i])
+ abs(pix[i + cols] - pix[i])
+ abs(pix[i + 1 + cols] - pix[i])
)/8;
}
__syncthreads();
pix[i] = p;
}
void edgedetect(unsigned char * pic, int count, int cols){
const int numthreads = count;
const int blocks = numthreads/512;
const int block_width = 512;
unsigned char * data = NULL;
hipMalloc(&data, count * sizeof(unsigned char));
hipMemcpy(data, pic, count* sizeof(unsigned char), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( person_threshold), dim3(blocks), dim3(block_width) , 0, 0, data, cols, count);
hipMemcpy(pic, data, count * sizeof(unsigned char), hipMemcpyDeviceToHost);
hipFree(data);
}
| d7972605b58aafea47d46ca122ce9833bb2e099d.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void person_threshold(unsigned char * pix, int cols, int count){
int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned char p = (unsigned char)0;
float ff;
if(i < count - cols ){
int diffX = ((int)pix[i+ cols] - (int)pix[i + cols- 1]);
int diffY = ((int)pix[i + cols] - (int)pix[i]);
p = (unsigned char)((int)sqrtf((diffX * diffX) + (diffY * diffY)));
__syncthreads();
pix[i] = p;
}
__syncthreads();
if( i> 0 && count - cols-1){
p = (pix[i-1] + pix[i+1] + pix[i + cols] + pix[i+1 + cols])/12;
}
__syncthreads();
pix[i] +=p;
if( i > 0 && i < count - cols - 1){
p = (pix[i-1] + pix[i+1] + pix[i + cols] + pix[i+1 + cols])/12;
}
__syncthreads();
pix[i] +p;
if( i > 0 &&i < count - cols - 1){
p = (pix[i-1] + pix[i+1] + pix[i + cols] + pix[i+1 + cols])/12;
}
__syncthreads();
pix[i] +=p;
int radius = 1;
if(i > cols + 1 && i < count - cols - 1 && i % cols != 0){
p = (
abs(pix[i-1-cols] - pix[i])
+ abs(pix[i-cols] - pix[i])
+ abs(pix[i + 1 - cols] - pix[i])
+ abs(pix[i -1] - pix[i]) + abs( pix[i + 1] - pix[i])
+ abs(pix[i - 1 + cols] - pix[i])
+ abs(pix[i + cols] - pix[i])
+ abs(pix[i + 1 + cols] - pix[i])
)/8;
}
__syncthreads();
pix[i] = p;
}
void edgedetect(unsigned char * pic, int count, int cols){
const int numthreads = count;
const int blocks = numthreads/512;
const int block_width = 512;
unsigned char * data = NULL;
cudaMalloc(&data, count * sizeof(unsigned char));
cudaMemcpy(data, pic, count* sizeof(unsigned char), cudaMemcpyHostToDevice);
person_threshold<<<blocks, block_width >>>(data, cols, count);
cudaMemcpy(pic, data, count * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(data);
}
|
898d15c7565df3fc379c3c4d1b9497b5a748598b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include <numeric>
#include <cmath>
#include <limits>
#include <chrono>
#include <algorithm>
#include <iomanip>
#include <cstring>
#include<addTest.h>
#define block_size 4
#define block_size_x 8
#define block_size_y 1
#define BLOCK_SIZE 512
// GPU
void check_error(void)
{
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(err);
}
}
// GPU
template<typename scalar_t, int vec_size>
struct alignas(sizeof(scalar_t) * 4) aligned_vector {
scalar_t val[4];
};
__global__ void gpuCopy(float*x, float* y, int nx, int ny, int block_size_t) {
extern __shared__ float staticShared[];
for(int i = threadIdx.x; i < ny; i += blockDim.x) {
#pragma unroll
for(int m =0; m < block_size; m++) {
int idx = threadIdx.x * block_size + m;
staticShared[idx] = x[i * nx + m + blockIdx.x * block_size];
}
__syncthreads();
#pragma unroll
for(int j = 0; j < block_size; j ++) {
int idx = threadIdx.x * block_size + j;
int out_id = (blockIdx.x * block_size + j) * ny + i;
y[out_id] = staticShared[idx];
}
}
}
__global__ void gpuCopy_thread(float*x, float* y, int nx, int ny) {
extern __shared__ float staticShared[];
int num = ny / block_size_y;
for(int in = 0; in < num ; in++) {
#pragma unroll
for(int m = 0; m < block_size_y; m++) {
#pragma unroll
for(int ix = 0; ix < block_size_x; ix++) {
int idx_x = nx * (in * block_size_y + m);
int idx_x_small = ix + (blockIdx.x * blockDim.x + threadIdx.x) * block_size_x;
int idx_src = idx_x + idx_x_small;
int idx_shared = m * blockDim.x * block_size_x + threadIdx.x * block_size_x + ix;
staticShared[idx_shared] = x[idx_src];
}
}
#pragma unroll
for(int ix = 0; ix < block_size_x; ix++) {
#pragma unroll
for(int m = 0; m < block_size_y; m ++) {
int idx_y = in * block_size_y + m;
int idx_higher = ((blockIdx.x * blockDim.x + threadIdx.x) * block_size_x + ix) * ny;
int idx_dst = idx_y + idx_higher;
int idx_shared = m * blockDim.x * block_size_x + threadIdx.x * block_size_x + ix;
y[idx_dst] = staticShared[idx_shared];
}
}
}
}
__global__ void gpuReduce_block_size(float*x, float* y, int nx, int ny) {
extern __shared__ float staticShared[];
for (int iy = 0; iy < ny; iy++) {
#pragma unroll
for(int ix = 0; ix < block_size_x; ix++) {
int idx_src = iy * nx + blockIdx.x * blockDim.x * block_size_x + threadIdx.x * block_size_x + ix;
staticShared[ix] += x[idx_src];
}
}
#pragma unroll
for (int ix = 0; ix < block_size_x; ix++) {
int idx_dst = blockIdx.x * blockDim.x * block_size_x + threadIdx.x * block_size_x + ix;
y[idx_dst] = staticShared[ix];
}
}
// register
__global__ void gpuReduce_vec_4(float*x, float* y, int nx, int ny) {
using Vec = aligned_vector<float, 4>;
Vec *src = reinterpret_cast<Vec *>(x);
Vec *dst = reinterpret_cast<Vec *>(y);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
Vec temp;
Vec tp;
if (idx + 3 > nx) return;
for(int t = idx; t < nx/4; t += gridDim.x * blockDim.x) {
temp.val[0] = 0.0f;
temp.val[1] = 0.0f;
temp.val[2] = 0.0f;
temp.val[3] = 0.0f;
for (int iy = 0; iy < ny; iy++) {
tp = src[t + (iy * nx)/4];
#pragma unroll
for (int ix = 0; ix < 4; ix++) {
temp.val[ix] += tp.val[ix];
}
}
dst[t] = temp;
}
}
__global__ void gpuReduce_share(float*x, float* y, int nx, int ny) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * BLOCK_SIZE;
extern __shared__ float staticShared[];
float temp = 0.0f;
if (idx > nx) return;
for (int iy = 0; iy < BLOCK_SIZE && idy + iy < ny; iy ++) {
int id = (idy + iy) * nx + idx;
staticShared[threadIdx.x] += x[id];
// float tp = x[id];
// temp += tp;
}
y[idx + blockIdx.y * nx] = staticShared[threadIdx.x];
}
__global__ void gpuReduce_base(float*x, float* y, int nx, int ny) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float temp = 0.0f;
if (idx > nx) return;
for (int iy = 0; iy < ny; iy ++) {
int id = iy * nx + idx;
float tp = x[id];
temp += tp;
}
y[idx + blockIdx.y * nx] = temp;
}
// reduce_y + reduce_global
__global__ void gpuReduce_y(float*x, float* y, int nx, int ny) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * BLOCK_SIZE;
extern __shared__ float staticShared[];
float temp = 0.0f;
if (idx > nx) return;
for (int iy = 0; iy < BLOCK_SIZE && idy + iy < ny; iy ++) {
int id = (idy + iy) * nx + idx;
float tp = x[id];
temp += tp;
}
y[idx + blockIdx.y * nx] = temp;
}
__global__ void gpuReduce_global(float*x, float* y, int nx, int ny) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * BLOCK_SIZE;
float temp = 0.0f;
if (idx > nx) return;
for (int iy = 0; iy < BLOCK_SIZE && idy + iy < ny; iy ++) {
int id = (idy + iy) * nx + idx;
float tp = x[id];
temp += tp;
}
y[idx + blockIdx.y * nx] = temp;
}
// CPU
void cpuCopy(float *x, float * y, int nx, int ny) {
for(int i = 0; i < nx; i ++) {
for (int j = 0; j < ny; j ++) {
int idx_dst = i * ny + j;
int idx_src = j * nx + i;
y[idx_dst] = x[idx_src];
}
}
}
void cpuReduce(float *x, float * y, int nx, int ny) {
for(int i = 0; i < nx; i ++) {
float sum = 0;
for (int j = 0; j < ny; j ++) {
int idx_src = j * nx + i;
sum += x[idx_src];
}
y[i] = sum;
}
}
int main(int block_size_t, char *argv[]) {
int dev = 0;
initDevice(dev);
// int nx = 75264;
// int ny = 2048;
// int ny= 512 , nx = 2048;
// int ny= 128 , nx = 1024;
int ny= 30522 , nx = 1024;
// int ny= 1024 , nx = 16 ;
int num = nx * ny;
float* x_h = (float *)malloc(num * sizeof(float));
float* y_h = (float *)malloc(num * sizeof(float));
float* gpu_h = (float *)malloc(num * sizeof(float));
initialData(x_h, num);
for(int i = 0; i < num ; i++) {
gpu_h[i] = 0;
y_h[i] = 0;
x_h[i] = 1 % 10;
}
float *x_d, *y_d;
CHECK(hipMalloc((float**)&x_d, num * sizeof(float)));
CHECK(hipMalloc((float**)&y_d, num * sizeof(float)));
CHECK(hipMemcpy(x_d, x_h, num * sizeof(float), hipMemcpyHostToDevice));
CHECK(hipMemcpy(y_d, y_h, num * sizeof(float), hipMemcpyHostToDevice));
int block = 32;
int grid = (nx + block_size -1) / block_size;
double start, end;
start = cpuSecond();
int block_num = 64;
int grid_2 = (nx / block_size_x + block_num - 1)/ block_num;
int block_4 = 32;
int grid_4 = (nx / 4 + block_4 - 1)/ block_4;
int block_1 = 32;
int grid_1 = (nx + block_1 - 1)/ block_1;
int grid_y = (ny + BLOCK_SIZE - 1)/BLOCK_SIZE;
dim3 grid3(grid_1, grid_y);
dim3 block3(block_1, 1);
std::chrono::high_resolution_clock::time_point t1, t2;
std::vector<std::vector<double>> timings(5);
hipLaunchKernelGGL(( gpuReduce_vec_4), dim3(grid_4), dim3(block_4), 0, 0, x_d, y_d, nx, ny);
hipDeviceSynchronize();
for (int i = 0; i < 1000; i++) {
hipLaunchKernelGGL(( gpuReduce_block_size), dim3(grid_2), dim3(block_num), sizeof(float) * block_size_x * block_num, 0, x_d, y_d, nx, ny);
hipDeviceSynchronize();
hipLaunchKernelGGL(( gpuReduce_vec_4), dim3(grid_4), dim3(block_4), 0, 0, x_d, y_d, nx, ny);
hipDeviceSynchronize();
hipLaunchKernelGGL(( gpuReduce_base), dim3(grid3), dim3(block3), 0, 0, x_d, y_d, nx, ny);
hipDeviceSynchronize();
t1 = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( gpuReduce_y), dim3(grid3), dim3(block3), sizeof(float) * block_1, 0, x_d, y_d, nx, ny);
check_error();
hipDeviceSynchronize();
check_error();
t2 = std::chrono::high_resolution_clock::now();
timings[0].push_back(std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count());
hipLaunchKernelGGL(( gpuReduce_global), dim3(grid_1), dim3(block_1), 0, 0, y_d, y_d, nx, grid_y);
hipDeviceSynchronize();
}
hipDeviceSynchronize();
end = cpuSecond();
cpuReduce(x_h,y_h, nx, ny);
double average = std::accumulate(timings[0].begin()+1, timings[0].end(), 0.0) / (double)(1000);
//cpuCopy(x_h,y_h, nx, ny);
printf("Kernel Time is %f s %f ms\n", end - start, average * 1000);
CHECK(hipMemcpy(gpu_h, y_d, num * sizeof(float), hipMemcpyDeviceToHost));
// for(int i = 0; i < nx * grid_y; i++) {
// if(gpu_h[i] != 512) printf("error %d %f %d\n", i / nx, gpu_h[26624], i);
// }
checkResult(y_h, gpu_h, nx);
hipFree(x_d);
hipFree(y_d);
free(x_h);
free(y_h);
free(gpu_h);
return 0;
}
| 898d15c7565df3fc379c3c4d1b9497b5a748598b.cu | #include <iostream>
#include <vector>
#include <numeric>
#include <cmath>
#include <limits>
#include <chrono>
#include <algorithm>
#include <iomanip>
#include <cstring>
#include<addTest.h>
#define block_size 4
#define block_size_x 8
#define block_size_y 1
#define BLOCK_SIZE 512
// GPU
void check_error(void)
{
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(err);
}
}
// GPU
template<typename scalar_t, int vec_size>
struct alignas(sizeof(scalar_t) * 4) aligned_vector {
scalar_t val[4];
};
__global__ void gpuCopy(float*x, float* y, int nx, int ny, int block_size_t) {
extern __shared__ float staticShared[];
for(int i = threadIdx.x; i < ny; i += blockDim.x) {
#pragma unroll
for(int m =0; m < block_size; m++) {
int idx = threadIdx.x * block_size + m;
staticShared[idx] = x[i * nx + m + blockIdx.x * block_size];
}
__syncthreads();
#pragma unroll
for(int j = 0; j < block_size; j ++) {
int idx = threadIdx.x * block_size + j;
int out_id = (blockIdx.x * block_size + j) * ny + i;
y[out_id] = staticShared[idx];
}
}
}
__global__ void gpuCopy_thread(float*x, float* y, int nx, int ny) {
extern __shared__ float staticShared[];
int num = ny / block_size_y;
for(int in = 0; in < num ; in++) {
#pragma unroll
for(int m = 0; m < block_size_y; m++) {
#pragma unroll
for(int ix = 0; ix < block_size_x; ix++) {
int idx_x = nx * (in * block_size_y + m);
int idx_x_small = ix + (blockIdx.x * blockDim.x + threadIdx.x) * block_size_x;
int idx_src = idx_x + idx_x_small;
int idx_shared = m * blockDim.x * block_size_x + threadIdx.x * block_size_x + ix;
staticShared[idx_shared] = x[idx_src];
}
}
#pragma unroll
for(int ix = 0; ix < block_size_x; ix++) {
#pragma unroll
for(int m = 0; m < block_size_y; m ++) {
int idx_y = in * block_size_y + m;
int idx_higher = ((blockIdx.x * blockDim.x + threadIdx.x) * block_size_x + ix) * ny;
int idx_dst = idx_y + idx_higher;
int idx_shared = m * blockDim.x * block_size_x + threadIdx.x * block_size_x + ix;
y[idx_dst] = staticShared[idx_shared];
}
}
}
}
__global__ void gpuReduce_block_size(float*x, float* y, int nx, int ny) {
extern __shared__ float staticShared[];
for (int iy = 0; iy < ny; iy++) {
#pragma unroll
for(int ix = 0; ix < block_size_x; ix++) {
int idx_src = iy * nx + blockIdx.x * blockDim.x * block_size_x + threadIdx.x * block_size_x + ix;
staticShared[ix] += x[idx_src];
}
}
#pragma unroll
for (int ix = 0; ix < block_size_x; ix++) {
int idx_dst = blockIdx.x * blockDim.x * block_size_x + threadIdx.x * block_size_x + ix;
y[idx_dst] = staticShared[ix];
}
}
// register
__global__ void gpuReduce_vec_4(float*x, float* y, int nx, int ny) {
using Vec = aligned_vector<float, 4>;
Vec *src = reinterpret_cast<Vec *>(x);
Vec *dst = reinterpret_cast<Vec *>(y);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
Vec temp;
Vec tp;
if (idx + 3 > nx) return;
for(int t = idx; t < nx/4; t += gridDim.x * blockDim.x) {
temp.val[0] = 0.0f;
temp.val[1] = 0.0f;
temp.val[2] = 0.0f;
temp.val[3] = 0.0f;
for (int iy = 0; iy < ny; iy++) {
tp = src[t + (iy * nx)/4];
#pragma unroll
for (int ix = 0; ix < 4; ix++) {
temp.val[ix] += tp.val[ix];
}
}
dst[t] = temp;
}
}
__global__ void gpuReduce_share(float*x, float* y, int nx, int ny) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * BLOCK_SIZE;
extern __shared__ float staticShared[];
float temp = 0.0f;
if (idx > nx) return;
for (int iy = 0; iy < BLOCK_SIZE && idy + iy < ny; iy ++) {
int id = (idy + iy) * nx + idx;
staticShared[threadIdx.x] += x[id];
// float tp = x[id];
// temp += tp;
}
y[idx + blockIdx.y * nx] = staticShared[threadIdx.x];
}
__global__ void gpuReduce_base(float*x, float* y, int nx, int ny) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float temp = 0.0f;
if (idx > nx) return;
for (int iy = 0; iy < ny; iy ++) {
int id = iy * nx + idx;
float tp = x[id];
temp += tp;
}
y[idx + blockIdx.y * nx] = temp;
}
// reduce_y + reduce_global
__global__ void gpuReduce_y(float*x, float* y, int nx, int ny) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * BLOCK_SIZE;
extern __shared__ float staticShared[];
float temp = 0.0f;
if (idx > nx) return;
for (int iy = 0; iy < BLOCK_SIZE && idy + iy < ny; iy ++) {
int id = (idy + iy) * nx + idx;
float tp = x[id];
temp += tp;
}
y[idx + blockIdx.y * nx] = temp;
}
__global__ void gpuReduce_global(float*x, float* y, int nx, int ny) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * BLOCK_SIZE;
float temp = 0.0f;
if (idx > nx) return;
for (int iy = 0; iy < BLOCK_SIZE && idy + iy < ny; iy ++) {
int id = (idy + iy) * nx + idx;
float tp = x[id];
temp += tp;
}
y[idx + blockIdx.y * nx] = temp;
}
// CPU
void cpuCopy(float *x, float * y, int nx, int ny) {
for(int i = 0; i < nx; i ++) {
for (int j = 0; j < ny; j ++) {
int idx_dst = i * ny + j;
int idx_src = j * nx + i;
y[idx_dst] = x[idx_src];
}
}
}
void cpuReduce(float *x, float * y, int nx, int ny) {
for(int i = 0; i < nx; i ++) {
float sum = 0;
for (int j = 0; j < ny; j ++) {
int idx_src = j * nx + i;
sum += x[idx_src];
}
y[i] = sum;
}
}
int main(int block_size_t, char *argv[]) {
int dev = 0;
initDevice(dev);
// int nx = 75264;
// int ny = 2048;
// int ny= 512 , nx = 2048;
// int ny= 128 , nx = 1024;
int ny= 30522 , nx = 1024;
// int ny= 1024 , nx = 16 ;
int num = nx * ny;
float* x_h = (float *)malloc(num * sizeof(float));
float* y_h = (float *)malloc(num * sizeof(float));
float* gpu_h = (float *)malloc(num * sizeof(float));
initialData(x_h, num);
for(int i = 0; i < num ; i++) {
gpu_h[i] = 0;
y_h[i] = 0;
x_h[i] = 1 % 10;
}
float *x_d, *y_d;
CHECK(cudaMalloc((float**)&x_d, num * sizeof(float)));
CHECK(cudaMalloc((float**)&y_d, num * sizeof(float)));
CHECK(cudaMemcpy(x_d, x_h, num * sizeof(float), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(y_d, y_h, num * sizeof(float), cudaMemcpyHostToDevice));
int block = 32;
int grid = (nx + block_size -1) / block_size;
double start, end;
start = cpuSecond();
int block_num = 64;
int grid_2 = (nx / block_size_x + block_num - 1)/ block_num;
int block_4 = 32;
int grid_4 = (nx / 4 + block_4 - 1)/ block_4;
int block_1 = 32;
int grid_1 = (nx + block_1 - 1)/ block_1;
int grid_y = (ny + BLOCK_SIZE - 1)/BLOCK_SIZE;
dim3 grid3(grid_1, grid_y);
dim3 block3(block_1, 1);
std::chrono::high_resolution_clock::time_point t1, t2;
std::vector<std::vector<double>> timings(5);
gpuReduce_vec_4<<<grid_4, block_4>>>(x_d, y_d, nx, ny);
cudaDeviceSynchronize();
for (int i = 0; i < 1000; i++) {
gpuReduce_block_size<<<grid_2, block_num, sizeof(float) * block_size_x * block_num>>>(x_d, y_d, nx, ny);
cudaDeviceSynchronize();
gpuReduce_vec_4<<<grid_4, block_4>>>(x_d, y_d, nx, ny);
cudaDeviceSynchronize();
gpuReduce_base<<<grid3, block3>>>(x_d, y_d, nx, ny);
cudaDeviceSynchronize();
t1 = std::chrono::high_resolution_clock::now();
gpuReduce_y<<<grid3, block3, sizeof(float) * block_1>>>(x_d, y_d, nx, ny);
check_error();
cudaDeviceSynchronize();
check_error();
t2 = std::chrono::high_resolution_clock::now();
timings[0].push_back(std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1).count());
gpuReduce_global<<<grid_1, block_1>>>(y_d, y_d, nx, grid_y);
cudaDeviceSynchronize();
}
cudaDeviceSynchronize();
end = cpuSecond();
cpuReduce(x_h,y_h, nx, ny);
double average = std::accumulate(timings[0].begin()+1, timings[0].end(), 0.0) / (double)(1000);
//cpuCopy(x_h,y_h, nx, ny);
printf("Kernel Time is %f s %f ms\n", end - start, average * 1000);
CHECK(cudaMemcpy(gpu_h, y_d, num * sizeof(float), cudaMemcpyDeviceToHost));
// for(int i = 0; i < nx * grid_y; i++) {
// if(gpu_h[i] != 512) printf("error %d %f %d\n", i / nx, gpu_h[26624], i);
// }
checkResult(y_h, gpu_h, nx);
cudaFree(x_d);
cudaFree(y_d);
free(x_h);
free(y_h);
free(gpu_h);
return 0;
}
|
935d92f292a1f938c32ab31f2197494027efd00f.hip | // !!! This is a file automatically generated by hipify!!!
%%cu
#include <stdio.h>
#include<iostream>
#include <hip/hip_runtime.h>
#define BLOCK_SIZE 1024
#define N 500000 //50000
using namespace std;
__global__ void sum( int *A, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i % (2*stride) == 0)
{
A[i] = A[i] + A[i+stride];
}
}
int main(void)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float time;
int threadsPerBlock;
int blocksPerGrid;
int n,stride;
threadsPerBlock = BLOCK_SIZE;
n = N;
blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
hipError_t err = hipSuccess;
int *A = (int *)malloc(n * sizeof(int));
int result=0;
for (int i = 0; i < n; ++i)
{
A[i] = i+1;
result = result + A[i];
}
int *dA = NULL;
err = hipMalloc((void **)&dA, n * sizeof(int));
err = hipMemcpy(dA, A, n * sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(start,0);
for(stride=1; stride < n; stride = stride * 2)
{
hipLaunchKernelGGL(( sum), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dA, stride);
}
hipEventRecord(stop,0);
hipEventSynchronize (stop);
hipEventElapsedTime (&time, start, stop);
hipEventDestroy (start);
hipEventDestroy (stop);
cout<<"Time taken: "<<time<<" milli seconds"<<endl;
err = hipMemcpy(A, dA, sizeof(int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
cout<<A[0]<<endl;
cout<<result<<endl;
err = hipFree(dA);
free(A);
return 0;
} | 935d92f292a1f938c32ab31f2197494027efd00f.cu | %%cu
#include <stdio.h>
#include<iostream>
#include <cuda_runtime.h>
#define BLOCK_SIZE 1024
#define N 500000 //50000
using namespace std;
__global__ void sum( int *A, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i % (2*stride) == 0)
{
A[i] = A[i] + A[i+stride];
}
}
int main(void)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float time;
int threadsPerBlock;
int blocksPerGrid;
int n,stride;
threadsPerBlock = BLOCK_SIZE;
n = N;
blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
cudaError_t err = cudaSuccess;
int *A = (int *)malloc(n * sizeof(int));
int result=0;
for (int i = 0; i < n; ++i)
{
A[i] = i+1;
result = result + A[i];
}
int *dA = NULL;
err = cudaMalloc((void **)&dA, n * sizeof(int));
err = cudaMemcpy(dA, A, n * sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start,0);
for(stride=1; stride < n; stride = stride * 2)
{
sum<<<blocksPerGrid, threadsPerBlock>>>(dA, stride);
}
cudaEventRecord(stop,0);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&time, start, stop);
cudaEventDestroy (start);
cudaEventDestroy (stop);
cout<<"Time taken: "<<time<<" milli seconds"<<endl;
err = cudaMemcpy(A, dA, sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cout<<A[0]<<endl;
cout<<result<<endl;
err = cudaFree(dA);
free(A);
return 0;
} |
e447441f063a1ce3717994258752b75a572c859a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
//#define CUBLAS_CHECK(opr) (opr);
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
//hope-change.
//CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault));
#if 0
if (hipMemcpy(Y, X, N, hipMemcpyDefault) != hipSuccess){ // NOLINT(caffe/alt_fn)
hipMemcpy(Y, X, N, hipMemcpyDefault);
hipMemcpy(Y, X, N, hipMemcpyDefault);
hipMemcpy(Y, X, N, hipMemcpyDefault);
hipMemcpy(Y, X, N, hipMemcpyDefault);
hipMemcpy(Y, X, N, hipMemcpyDefault);
}
#endif
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
template <typename Dtype>
__global__ void bound_kernel(const int n, const Dtype* a, const Dtype min_val,
const Dtype max_val, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = min(max(a[index], min_val), max_val);
}
}
template <>
void caffe_gpu_bound<float>(const int N, const float* a, const float min_val,
const float max_val, float* y) {
bound_kernel<float> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >(
N, a, min_val, max_val, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_bound<double>(const int N, const double* a, const double min_val,
const double max_val, double* y) {
bound_kernel<double> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >(
N, a, min_val, max_val, y);
CUDA_POST_KERNEL_CHECK;
}
} // namespace caffe
| e447441f063a1ce3717994258752b75a572c859a.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
//#define CUBLAS_CHECK(opr) (opr);
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
//hope-change.
//CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault));
#if 0
if (cudaMemcpy(Y, X, N, cudaMemcpyDefault) != cudaSuccess){ // NOLINT(caffe/alt_fn)
cudaMemcpy(Y, X, N, cudaMemcpyDefault);
cudaMemcpy(Y, X, N, cudaMemcpyDefault);
cudaMemcpy(Y, X, N, cudaMemcpyDefault);
cudaMemcpy(Y, X, N, cudaMemcpyDefault);
cudaMemcpy(Y, X, N, cudaMemcpyDefault);
}
#endif
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
template <typename Dtype>
__global__ void bound_kernel(const int n, const Dtype* a, const Dtype min_val,
const Dtype max_val, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = min(max(a[index], min_val), max_val);
}
}
template <>
void caffe_gpu_bound<float>(const int N, const float* a, const float min_val,
const float max_val, float* y) {
bound_kernel<float> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >(
N, a, min_val, max_val, y);
CUDA_POST_KERNEL_CHECK;
}
template <>
void caffe_gpu_bound<double>(const int N, const double* a, const double min_val,
const double max_val, double* y) {
bound_kernel<double> << <CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS >> >(
N, a, min_val, max_val, y);
CUDA_POST_KERNEL_CHECK;
}
} // namespace caffe
|
8690ead8a3ecfe745d1f64c6773819abe1375044.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "helper_cuda.h"
inline void swap(int *array, unsigned int i, unsigned int j)
{
int tmp = array[i];
array[i] = array[j];
array[j] = tmp;
}
void brick_sort(int *array, unsigned int size)
{
bool sorted = false;
while (!sorted) {
sorted = true;
// odd sort
for (unsigned int i = 1; i < size - 1; i += 2) {
if (array[i] > array[i + 1]) {
swap(array, i, i + 1);
sorted = false;
}
}
// even sort
for (unsigned int i = 0; i < size - 1; i += 2) {
if (array[i] > array[i + 1]) {
swap(array, i, i + 1);
sorted = false;
}
}
}
}
__global__ void brick_sort_kernel(int *h_in, bool is_even, unsigned int size)
{
// global thread ID
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
int tmp;
// even phase
if (is_even && gid * 2 + 1 < size && h_in[gid * 2] > h_in[gid * 2 + 1]) {
tmp = h_in[gid * 2];
h_in[gid * 2] = h_in[gid * 2 + 1];
h_in[gid * 2 + 1] = tmp;
// odd phase
} else if (!is_even && gid * 2 + 2 < size && h_in[gid * 2 + 1] > h_in[gid * 2 + 2]) {
tmp = h_in[gid * 2 + 1];
h_in[gid * 2 + 1] = h_in[gid * 2 + 2];
h_in[gid * 2 + 2] = tmp;
}
}
// brick sort on GPU
void gpu_brick_sort(int *h_in, unsigned int size)
{
int *d_in;
unsigned int blocks, threads_per_block;
// no need to sort
if (size == 1) {
return;
}
// allocate GPU memory
checkCudaErrors(hipMalloc((void**)&d_in, size * sizeof(int)));
// copy input from host memory to GPU memory
hipMemcpy(d_in, h_in, size * sizeof(int), hipMemcpyHostToDevice);
// lauch kernels to do computation
// launch size / 2 threads in total
// run size - 1 rounds
threads_per_block = 1024;
blocks = (size / 2 + threads_per_block - 1) / threads_per_block;
if (blocks == 1) {
threads_per_block = min(size, threads_per_block);
}
for (int i = 0; i < size - 1; i++) {
hipLaunchKernelGGL(( brick_sort_kernel), dim3(blocks), dim3(threads_per_block), 0, 0, d_in, i % 2, size);
}
// copy output from GPU memory to host memory
hipMemcpy(h_in, d_in, size * sizeof(int), hipMemcpyDeviceToHost);
// free GPU memory
hipFree(d_in);
}
int main()
{
const int array_size = 1000;
int array[array_size];
// sort result computed by GPU
int h_in[array_size];
bool result;
// initialize random number generator
srand(time(NULL));
// generate input
printf("Input:\n");
for (int i = 0; i < array_size; i++) {
array[i] = rand() % array_size;
h_in[i] = array[i];
printf("%d ", array[i]);
}
printf("\n");
// brick sort on GPU
gpu_brick_sort(h_in, array_size);
printf("GPU Output:\n");
for (int i = 0; i < array_size; i++) {
printf("%d ", h_in[i]);
}
printf("\n");
// brick sort on CPU
result = true;
brick_sort(array, array_size);
printf("Expected Output:\n");
for (int i = 0; i < array_size; i++) {
printf("%d ", array[i]);
if (array[i] != h_in[i]) {
result = false;
}
}
printf("\n");
if (result) {
printf("Correct result\n");
} else {
printf("Wrong result\n");
}
return 0;
} | 8690ead8a3ecfe745d1f64c6773819abe1375044.cu | #include <stdio.h>
#include "helper_cuda.h"
inline void swap(int *array, unsigned int i, unsigned int j)
{
int tmp = array[i];
array[i] = array[j];
array[j] = tmp;
}
void brick_sort(int *array, unsigned int size)
{
bool sorted = false;
while (!sorted) {
sorted = true;
// odd sort
for (unsigned int i = 1; i < size - 1; i += 2) {
if (array[i] > array[i + 1]) {
swap(array, i, i + 1);
sorted = false;
}
}
// even sort
for (unsigned int i = 0; i < size - 1; i += 2) {
if (array[i] > array[i + 1]) {
swap(array, i, i + 1);
sorted = false;
}
}
}
}
__global__ void brick_sort_kernel(int *h_in, bool is_even, unsigned int size)
{
// global thread ID
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
int tmp;
// even phase
if (is_even && gid * 2 + 1 < size && h_in[gid * 2] > h_in[gid * 2 + 1]) {
tmp = h_in[gid * 2];
h_in[gid * 2] = h_in[gid * 2 + 1];
h_in[gid * 2 + 1] = tmp;
// odd phase
} else if (!is_even && gid * 2 + 2 < size && h_in[gid * 2 + 1] > h_in[gid * 2 + 2]) {
tmp = h_in[gid * 2 + 1];
h_in[gid * 2 + 1] = h_in[gid * 2 + 2];
h_in[gid * 2 + 2] = tmp;
}
}
// brick sort on GPU
void gpu_brick_sort(int *h_in, unsigned int size)
{
int *d_in;
unsigned int blocks, threads_per_block;
// no need to sort
if (size == 1) {
return;
}
// allocate GPU memory
checkCudaErrors(cudaMalloc((void**)&d_in, size * sizeof(int)));
// copy input from host memory to GPU memory
cudaMemcpy(d_in, h_in, size * sizeof(int), cudaMemcpyHostToDevice);
// lauch kernels to do computation
// launch size / 2 threads in total
// run size - 1 rounds
threads_per_block = 1024;
blocks = (size / 2 + threads_per_block - 1) / threads_per_block;
if (blocks == 1) {
threads_per_block = min(size, threads_per_block);
}
for (int i = 0; i < size - 1; i++) {
brick_sort_kernel<<<blocks, threads_per_block>>>(d_in, i % 2, size);
}
// copy output from GPU memory to host memory
cudaMemcpy(h_in, d_in, size * sizeof(int), cudaMemcpyDeviceToHost);
// free GPU memory
cudaFree(d_in);
}
int main()
{
const int array_size = 1000;
int array[array_size];
// sort result computed by GPU
int h_in[array_size];
bool result;
// initialize random number generator
srand(time(NULL));
// generate input
printf("Input:\n");
for (int i = 0; i < array_size; i++) {
array[i] = rand() % array_size;
h_in[i] = array[i];
printf("%d ", array[i]);
}
printf("\n");
// brick sort on GPU
gpu_brick_sort(h_in, array_size);
printf("GPU Output:\n");
for (int i = 0; i < array_size; i++) {
printf("%d ", h_in[i]);
}
printf("\n");
// brick sort on CPU
result = true;
brick_sort(array, array_size);
printf("Expected Output:\n");
for (int i = 0; i < array_size; i++) {
printf("%d ", array[i]);
if (array[i] != h_in[i]) {
result = false;
}
}
printf("\n");
if (result) {
printf("Correct result\n");
} else {
printf("Wrong result\n");
}
return 0;
} |
5eac31f8628c56f2f858d8e495f058600a0cabcc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
/******************************added by hwzhu start****************************/
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__device__ int translate_idx_inv(
int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w*scale_factor+off_x;
z = z*scale_factor+off_y;
d2 *= scale_factor;
d3 *= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
template <typename Dtype>
__global__ void upscale(const Dtype *input, Dtype *output,
int no_elements, int scale_factor, int d1, int d2, int d3) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
}
template <typename Dtype>
__global__ void downscale(Dtype *gradInput_data, const Dtype *gradOutput_data,
int no_elements, int scale_factor, int d1, int d2,
int d3) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
for (int i = 0; i < scale_factor; i++) {
for (int j = 0; j < scale_factor; j++) {
int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j);
gradInput_data[ii] += gradOutput_data[ipidx];
}
}
}
/******************************added by hwzhu end ****************************/
template <typename Dtype>
__global__ void UpsampleForward(const int nthreads, int in_w, int in_h,
int out_w, int out_h, const Dtype* bottom_data,
const Dtype* bottom_mask, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int offset = index / (in_w * in_h) * out_w * out_h;
int upsample_idx = static_cast<int>(bottom_mask[index]);
top_data[offset + upsample_idx] = bottom_data[index];
}
}
template <typename Dtype>
void UpsampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
/******************************added by hwzhu start********************************/
if (only_one_bottom_blob_flag){
int d1, d2, d3;
d1 = top[0]->shape(1);
d2 = top[0]->shape(2);
d3 = top[0]->shape(3);
int no_elements = top[0]->count();
upscale<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(no_elements)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom[0]->gpu_data(),
top[0]->mutable_gpu_data(), no_elements, scale_, d1, d2, d3);
return;
}
/******************************added by hwzhu end ********************************/
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_mask = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_gpu_set(top[0]->count(), Dtype(0), top_data);
int bottom_count = bottom[0]->count();
hipLaunchKernelGGL(( UpsampleForward<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_count, bottom[0]->width(), bottom[0]->height(),
top[0]->width(), top[0]->height(), bottom_data, bottom_mask, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void UpsampleBackward(const int nthreads, int in_w, int in_h,
int out_w, int out_h, const Dtype* top_diff,
const Dtype* bottom_mask, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int offset = index / (in_w * in_h) * out_w * out_h;
int upsample_idx = static_cast<int>(bottom_mask[index]);
bottom_diff[index] = top_diff[offset + upsample_idx];
}
}
template <typename Dtype>
void UpsampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
/***************************************added by hwzhu start*****************************************/
if (only_one_bottom_blob_flag){
if (propagate_down[0]){
int d1, d2, d3;
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
d1 = bottom[0]->shape(1);
d2 = bottom[0]->shape(2);
d3 = bottom[0]->shape(3);
int no_elements = bottom[0]->count();
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff);
downscale<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(no_elements)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_diff, top[0]->gpu_diff(), no_elements, scale_, d1, d2, d3);
}
return;
}
/***************************************added by hwzhu end *****************************************/
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_mask = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
caffe_gpu_set(bottom_count, Dtype(0.), bottom_diff);
hipLaunchKernelGGL(( UpsampleBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_count, bottom[0]->width(), bottom[0]->height(),
top[0]->width(), top[0]->height(), top_diff, bottom_mask, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(UpsampleLayer);
} // namespace caffe
| 5eac31f8628c56f2f858d8e495f058600a0cabcc.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
/******************************added by hwzhu start****************************/
__device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor;
z = z/scale_factor;
d2 /= scale_factor;
d3 /= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
__device__ int translate_idx_inv(
int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y) {
int x, y, z, w;
w = ii % d3;
ii = ii/d3;
z = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w*scale_factor+off_x;
z = z*scale_factor+off_y;
d2 *= scale_factor;
d3 *= scale_factor;
return (((x*d1+y)*d2)+z)*d3+w;
}
template <typename Dtype>
__global__ void upscale(const Dtype *input, Dtype *output,
int no_elements, int scale_factor, int d1, int d2, int d3) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, scale_factor);
output[ii]=input[ipidx];
}
template <typename Dtype>
__global__ void downscale(Dtype *gradInput_data, const Dtype *gradOutput_data,
int no_elements, int scale_factor, int d1, int d2,
int d3) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
if (ii >= no_elements) return;
for (int i = 0; i < scale_factor; i++) {
for (int j = 0; j < scale_factor; j++) {
int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j);
gradInput_data[ii] += gradOutput_data[ipidx];
}
}
}
/******************************added by hwzhu end ****************************/
template <typename Dtype>
__global__ void UpsampleForward(const int nthreads, int in_w, int in_h,
int out_w, int out_h, const Dtype* bottom_data,
const Dtype* bottom_mask, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int offset = index / (in_w * in_h) * out_w * out_h;
int upsample_idx = static_cast<int>(bottom_mask[index]);
top_data[offset + upsample_idx] = bottom_data[index];
}
}
template <typename Dtype>
void UpsampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
/******************************added by hwzhu start********************************/
if (only_one_bottom_blob_flag){
int d1, d2, d3;
d1 = top[0]->shape(1);
d2 = top[0]->shape(2);
d3 = top[0]->shape(3);
int no_elements = top[0]->count();
upscale<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(no_elements), CAFFE_CUDA_NUM_THREADS>>>(
bottom[0]->gpu_data(),
top[0]->mutable_gpu_data(), no_elements, scale_, d1, d2, d3);
return;
}
/******************************added by hwzhu end ********************************/
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_mask = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_gpu_set(top[0]->count(), Dtype(0), top_data);
int bottom_count = bottom[0]->count();
UpsampleForward<Dtype><<<CAFFE_GET_BLOCKS(bottom_count), CAFFE_CUDA_NUM_THREADS>>>(
bottom_count, bottom[0]->width(), bottom[0]->height(),
top[0]->width(), top[0]->height(), bottom_data, bottom_mask, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void UpsampleBackward(const int nthreads, int in_w, int in_h,
int out_w, int out_h, const Dtype* top_diff,
const Dtype* bottom_mask, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int offset = index / (in_w * in_h) * out_w * out_h;
int upsample_idx = static_cast<int>(bottom_mask[index]);
bottom_diff[index] = top_diff[offset + upsample_idx];
}
}
template <typename Dtype>
void UpsampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
/***************************************added by hwzhu start*****************************************/
if (only_one_bottom_blob_flag){
if (propagate_down[0]){
int d1, d2, d3;
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
d1 = bottom[0]->shape(1);
d2 = bottom[0]->shape(2);
d3 = bottom[0]->shape(3);
int no_elements = bottom[0]->count();
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff);
downscale<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(no_elements), CAFFE_CUDA_NUM_THREADS>>>(
bottom_diff, top[0]->gpu_diff(), no_elements, scale_, d1, d2, d3);
}
return;
}
/***************************************added by hwzhu end *****************************************/
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_mask = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
caffe_gpu_set(bottom_count, Dtype(0.), bottom_diff);
UpsampleBackward<Dtype><<<CAFFE_GET_BLOCKS(bottom_count), CAFFE_CUDA_NUM_THREADS>>>(
bottom_count, bottom[0]->width(), bottom[0]->height(),
top[0]->width(), top[0]->height(), top_diff, bottom_mask, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(UpsampleLayer);
} // namespace caffe
|
ed211039a0e2f1144525c8c42b001a700f5c1f57.hip | // !!! This is a file automatically generated by hipify!!!
/**
* hessianIO.cu
* Created on: May 21, 2012
* Author: Zeyi Wen
* Copyright @DBGroup University of Melbourne
**/
#include "deviceHessian.h"
#include <helper_cuda.h>
#include <sys/time.h>
#include "../gpu_global_utility.h"
#include "../constant.h"
#include "rocblas.h"
#include "../storageManager.h"
using std::endl;
long lIO_timer = 0;
long lIO_counter = 0;
CKernelCalculater *DeviceHessian::m_pKernelCalculater = NULL;
/*
* @brief: read Hessian diagonal from file (for RBF kernel, we assign 1.0 directly)
*/
bool DeviceHessian::GetHessianDiag(const string &strFileName, const int &nNumofInstance, float_point *pfHessianDiag)
{
bool bReturn = true;
assert(nNumofInstance > 0);
if(m_pKernelCalculater->GetType() == RBFKERNEL)
m_pKernelCalculater->GetHessianDiag(strFileName, nNumofInstance, pfHessianDiag);
else
{
if(m_nRowStartPos1 != -1)
{
assert(m_nRowStartPos1 >= 0 && m_nRowEndPos1 > 0);
for(int i = m_nRowStartPos1; i <= m_nRowEndPos1; i++)
{
assert(i < m_nTotalNumofInstance);
pfHessianDiag[i - m_nRowStartPos1] = m_pfHessianDiag[i];
}
}
if(m_nRowStartPos2 != -1)
{
int nOffset = 0;
if(m_nRowEndPos1 != -1)
{
nOffset = (m_nRowEndPos1 + 1);
}
assert(m_nRowStartPos2 >= 0 && m_nRowEndPos2 > 0);
for(int i = m_nRowStartPos2; i <= m_nRowEndPos2; i++)
{
assert(i - m_nRowStartPos2 + nOffset < nNumofInstance && (i - m_nRowStartPos2 + nOffset) >= 0);
pfHessianDiag[i - m_nRowStartPos2 + nOffset] = m_pfHessianDiag[i];
}
}
}
return bReturn;
}
/*
* @brief: get the size for each batch write
* @return: the number of Hessian rows for each write
*/
int DeviceHessian::GetNumofBatchWriteRows()
{
int nReturn = 0;
if(m_nTotalNumofInstance > 0)
{
//initialize cache
StorageManager *manager = StorageManager::getManager();
long long nMaxNumofFloatPoint = manager->GetFreeGPUMem();
nReturn = (nMaxNumofFloatPoint / (m_nTotalNumofInstance * sizeof(float_point)));
}
if(nReturn > m_nTotalNumofInstance)
{
nReturn = m_nTotalNumofInstance;
}
return nReturn;
}
/*
* @brief: compute hessian sub matrix
*/
bool DeviceHessian::ComputeSubHessianMatrix(float_point *pfDevTotalSamples, float_point *pfDevTransSamples,float_point *pfDevSelfDot,
float_point *pfDevNumofHessianRows, int nSubMatrixCol, int nSubMatrixRow,
int nStartRow, int nStartCol)
{
bool bReturn = true;
if(m_nNumofDim > NORMAL_NUMOF_DIM)
{
cerr << "the number of dimension is very large" << endl;
}
//compute a few rows of Hessian matrix
long long nHessianRowsSpace = nSubMatrixRow * (long long)nSubMatrixCol;
long long nHessianRowSpaceInByte = sizeof(float_point) * nHessianRowsSpace;
checkCudaErrors(hipMemset(pfDevNumofHessianRows, 0, nHessianRowSpaceInByte));
timeval t1, t2;
float_point elapsedTime;
gettimeofday(&t1, NULL);
// cout << "computing " << nSubMatrixRow << " sub Hessian rows which have " << nSubMatrixCol << " column each" << endl;
if(hipGetLastError() != hipSuccess)
{
cerr << "cuda error before ComputeHessianRows" << endl;
exit(0);
}
//pfDevTotalSamples is for row width (|); pfDevTransSamples is for col width (-)
bool bComputeRows = m_pKernelCalculater->ComputeHessianRows(pfDevTotalSamples, pfDevTransSamples, pfDevSelfDot, pfDevNumofHessianRows,
nSubMatrixCol, m_nNumofDim, nSubMatrixRow, nStartRow, nStartCol);
hipDeviceSynchronize();
gettimeofday(&t2, NULL);
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0;
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0;
// cout << "computing kernel time " << elapsedTime << " ms.\n";
assert(bComputeRows == true);
return bReturn;
}
/**
* @brief: compute the whole hessian matrix at once
*/
void DeviceHessian::ComputeHessianAtOnce(float_point *pfTotalSamples, float_point *pfTransSamples, float_point *pfSelfDot)
{
//compute a few rows of Hessian matrix
float_point *pfDevTransSamples;
float_point *pfDevNumofHessianRows;
float_point *pfDevTotalSamples;
float_point *pfDevSelfDot;
long lSpace = (long)m_nNumofDim * m_nTotalNumofInstance;
long lResult = (long)m_nTotalNumofInstance * m_nTotalNumofInstance;
checkCudaErrors(hipMalloc((void**)&pfDevTransSamples, sizeof(float_point) * lSpace));
checkCudaErrors(hipMalloc((void**)&pfDevTotalSamples, sizeof(float_point) * lSpace));
checkCudaErrors(hipMalloc((void**)&pfDevNumofHessianRows, sizeof(float_point) * lResult));
checkCudaErrors(hipMalloc((void**)&pfDevSelfDot, sizeof(float_point) * m_nTotalNumofInstance));
checkCudaErrors(hipMemcpy(pfDevSelfDot, pfSelfDot, sizeof(float_point) * m_nTotalNumofInstance, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(pfDevTotalSamples, pfTotalSamples,
sizeof(float_point) * lSpace, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(pfDevTransSamples, pfTransSamples,
sizeof(float_point) * lSpace, hipMemcpyHostToDevice));
ComputeSubHessianMatrix(pfDevTotalSamples, pfDevTransSamples, pfDevSelfDot,
pfDevNumofHessianRows, m_nTotalNumofInstance, m_nTotalNumofInstance, 0, 0);
checkCudaErrors(hipMemcpy(m_pfHessianRowsInHostMem, pfDevNumofHessianRows,
sizeof(float_point) * lResult, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(pfDevTotalSamples));
checkCudaErrors(hipFree(pfDevTransSamples));
checkCudaErrors(hipFree(pfDevNumofHessianRows));
checkCudaErrors(hipFree(pfDevSelfDot));
}
/*
* @brief: compute Hessian matrix, write Hessian matrix and Hessian diagonal to two files respectively
* @param: strHessianMatrixFileName: file name of a file storing hessian matrix (which serves as an output of this function)
* @param: strDiagHessianFileName: file name of a file storing diagonal of hessian matrix
* @param: v_v_DocVector: document vectors of all the training samples
*/
bool DeviceHessian::PrecomputeHessian(const string &strHessianMatrixFileName,
const string &strDiagHessianFileName,
vector<vector<float_point> > &v_v_DocVector)
{
bool bReturn = true;
m_nTotalNumofInstance = v_v_DocVector.size();
m_nNumofDim = (v_v_DocVector.front()).size();
m_nNumofHessianRowsToWrite = GetNumofBatchWriteRows();
assert(m_nNumofHessianRowsToWrite != 0);
//linear array for training samples
long long nSpaceForSamples = (long long)m_nTotalNumofInstance * m_nNumofDim;
float_point *pfTotalSamples = new float_point[nSpaceForSamples];
memset(pfTotalSamples, 0, sizeof(float_point) * nSpaceForSamples);
//copy samples to a linear array
for(int i = 0; i < m_nTotalNumofInstance; i++)
{
//assign document vector to svm node
for(int j = 0; j < m_nNumofDim; j++)
{
long long nIndex = (long long)i * m_nNumofDim + j;
pfTotalSamples[nIndex] = v_v_DocVector[i][j];
//pfTransSamples[j * m_nTotalNumofInstance + i] = v_v_DocVector[i][j];
}
}
// v_v_DocVector.clear();
float_point *pfTransSamples = new float_point[nSpaceForSamples];
memset(pfTransSamples, 0, sizeof(float_point) * nSpaceForSamples);
//copy samples to a linear array
for(int i = 0; i < m_nTotalNumofInstance; i++)
{
//assign document vector to svm node
for(int j = 0; j < m_nNumofDim; j++)
{
long long nIndex = (long long)j * m_nTotalNumofInstance + i;
long long nIndex2 = (long long)i * m_nNumofDim + j;
pfTransSamples[nIndex] = pfTotalSamples[nIndex2];
}
}
//self dot product
float_point *pfSelfDot = new float_point[m_nTotalNumofInstance];
//copy samples to a linear array
for(int i = 0; i < m_nTotalNumofInstance; i++)
{
//assign document vector to svm node
float_point fTemp = 0;;
for(int j = 0; j < m_nNumofDim; j++)
{
long long nIndex = (long long)i * m_nNumofDim + j;
//fTemp += (v_v_DocVector[i][j] * v_v_DocVector[i][j]);
fTemp += (pfTotalSamples[nIndex] * pfTotalSamples[nIndex]);
}
pfSelfDot[i] = fTemp;
}
//compute the minimum number of sub matrices that are required to calculate the whole Hessian matrix
StorageManager *manager = StorageManager::getManager();
int nNumofPartForARow = manager->PartOfRow(m_nTotalNumofInstance,m_nNumofDim);
int nNumofPartForACol = manager->PartOfCol(nNumofPartForARow, m_nTotalNumofInstance, m_nNumofDim);
cout << nNumofPartForARow << " parts of row; " << nNumofPartForACol << " parts of col.";
cout.flush();
//If the kernel matrix has been computed (for debugging your code), you can use 1/5 to 5/5 to save some computation
//1/5
pHessianFile = fopen(strHessianMatrixFileName.c_str(), "wb");
if(pHessianFile == NULL)
{
cout << "open " << strHessianMatrixFileName << " failed" << endl;
exit(0);
}
/**/
/*********** process the whole matrix at once *****************/
if(nNumofPartForARow == nNumofPartForACol && nNumofPartForACol == 1)
{
ComputeHessianAtOnce(pfTotalSamples, pfTransSamples, pfSelfDot);
delete[] pfTotalSamples;
delete[] pfTransSamples;
delete[] pfSelfDot;
fclose(pHessianFile);
return true;
}
//open file to write. When the file is open, the content is empty
//length for sub row
int *pLenofEachSubRow = new int[nNumofPartForARow];
int nAveLenofSubRow = Ceil(m_nTotalNumofInstance, nNumofPartForARow);
for(int i = 0; i < nNumofPartForARow; i++)
{
if(i + 1 != nNumofPartForARow)
pLenofEachSubRow[i] = nAveLenofSubRow;
else
pLenofEachSubRow[i] = m_nTotalNumofInstance - nAveLenofSubRow * i;
}
//length for sub row
int *pLenofEachSubCol = new int[nNumofPartForACol];
int nAveLenofSubCol = Ceil(m_nTotalNumofInstance, nNumofPartForACol);
for(int i = 0; i < nNumofPartForACol; i++)
{
if(i + 1 != nNumofPartForACol)
pLenofEachSubCol[i] = nAveLenofSubCol;
else
pLenofEachSubCol[i] = m_nTotalNumofInstance - nAveLenofSubCol * i;
}
/*********************start to compute the sub matrices******************/
//variables on host side
long long lMaxSubMatrixSize = (long long)nAveLenofSubCol * nAveLenofSubRow;
long long nMaxTransSamplesInCol = (long long)m_nNumofDim * nAveLenofSubRow;
long long nMaxSamplesInRow = (long long)m_nNumofDim * nAveLenofSubCol;
float_point *pfSubMatrix = new float_point[lMaxSubMatrixSize];
//float_point *pfSubMatrixRowMajor = new float_point[lMaxSubMatrixSize];
float_point *pfTransSamplesForAColInSubMatrix;
pfTransSamplesForAColInSubMatrix = new float_point[nMaxTransSamplesInCol];
float_point *pfSamplesForARowInSubMatrix;
// pfSamplesForARowInSubMatrix = new float_point[nMaxSamplesInRow];
//compute a few rows of Hessian matrix
float_point *pfDevTransSamples;
float_point *pfDevNumofHessianRows;
float_point *pfDevTotalSamples;
float_point *pfDevSelfDot;
checkCudaErrors(hipMalloc((void**)&pfDevTransSamples, sizeof(float_point) * nMaxTransSamplesInCol));
checkCudaErrors(hipMalloc((void**)&pfDevTotalSamples, sizeof(float_point) * nMaxSamplesInRow));
checkCudaErrors(hipMalloc((void**)&pfDevNumofHessianRows, sizeof(float_point) * lMaxSubMatrixSize));
checkCudaErrors(hipMalloc((void**)&pfDevSelfDot, sizeof(float_point) * m_nTotalNumofInstance));
checkCudaErrors(hipMemcpy(pfDevSelfDot, pfSelfDot, sizeof(float_point) * m_nTotalNumofInstance, hipMemcpyHostToDevice));
//compuate sub matrix
for(int iPartofCol = 0; iPartofCol < nNumofPartForACol; iPartofCol++)
{
//get sub matrix row
int nSubMatrixRow;
if(iPartofCol == nNumofPartForACol - 1)
nSubMatrixRow = pLenofEachSubCol[iPartofCol];
else
nSubMatrixRow = nAveLenofSubCol;
for(int jPartofRow = 0; jPartofRow < nNumofPartForARow; jPartofRow++)
{
int nSubMatrixCol;
//get sub matrix column
if(jPartofRow == nNumofPartForARow - 1)
nSubMatrixCol = pLenofEachSubRow[jPartofRow];
else
nSubMatrixCol = nAveLenofSubRow;
// cout << "row= " << nSubMatrixRow << " col= " << nSubMatrixCol << endl;
// cout << ".";
// cout.flush();
//allocate memory for this sub matrix
long long nHessianSubMatrixSpace = nSubMatrixRow * nSubMatrixCol;
memset(pfSubMatrix, 0, sizeof(float_point) * nHessianSubMatrixSpace);
//get the copies of sample data for computing sub matrix
//sample for sub matrix rows
/*for(int d = 0; d < m_nNumofDimensions; d++)
{
memcpy(pfSamplesForARowInSubMatrix + d * nSubMatrixRow,
pfTotalSamples + iPartofCol * nAveLenofSubCol + d * m_nTotalNumofSamples,
sizeof(float_point) * nSubMatrixRow);
}
pfTransSamplesForAColInSubMatrix = pfTransSamples + jPartofRow * nAveLenofSubRow * m_nNumofDimensions;*/
pfSamplesForARowInSubMatrix = pfTotalSamples + (long long)iPartofCol * nAveLenofSubCol * m_nNumofDim;
for(int d = 0; d < m_nNumofDim; d++)
{
//for(int k = 0; k < nSubMatrixCol; k++)
//{
//pfTransSamplesForAColInSubMatrix[k + d * nSubMatrixCol] =
//pfTransSamples[k + jPartofRow * nAveLenofSubRow + d * m_nTotalNumofSamples];
long long nSampleIndex = (long long)jPartofRow * nAveLenofSubRow + (long long)d * m_nTotalNumofInstance;
long long nIndexForSub = (long long)d * nSubMatrixCol;
memcpy(pfTransSamplesForAColInSubMatrix + nIndexForSub,
pfTransSamples + nSampleIndex, sizeof(float_point) * nSubMatrixCol);
//}
}
long long nSpaceForSamplesInRow = (long long)m_nNumofDim * nSubMatrixRow;
long long nSpaceForSamplesInCol = (long long)m_nNumofDim * nSubMatrixCol;
checkCudaErrors(hipMemcpy(pfDevTotalSamples, pfSamplesForARowInSubMatrix,
sizeof(float_point) * nSpaceForSamplesInRow, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(pfDevTransSamples, pfTransSamplesForAColInSubMatrix,
sizeof(float_point) * nSpaceForSamplesInCol, hipMemcpyHostToDevice));
//
//compute the value of the sub matrix
int nStartRow = iPartofCol * nAveLenofSubCol;
int nStartCol = jPartofRow * nAveLenofSubRow;
ComputeSubHessianMatrix(pfDevTotalSamples, pfDevTransSamples, pfDevSelfDot,
pfDevNumofHessianRows, nSubMatrixCol, nSubMatrixRow,
nStartRow, nStartCol);
int nHessianRowsSpace = nSubMatrixRow * nSubMatrixCol;
checkCudaErrors(hipMemcpy(pfSubMatrix, pfDevNumofHessianRows,
sizeof(float_point) * nHessianRowsSpace, hipMemcpyDeviceToHost));
//store the sub matrix
//hessian sub matrix info
SubMatrix subMatrix;
subMatrix.nColIndex = jPartofRow * nAveLenofSubRow;
subMatrix.nColSize = nSubMatrixCol;
subMatrix.nRowIndex = iPartofCol * nAveLenofSubCol;
subMatrix.nRowSize = nSubMatrixRow;
SaveRows(pfSubMatrix, subMatrix);
}
cout << ".";
cout.flush();
}
delete[] pfSubMatrix;
delete[] pfTransSamplesForAColInSubMatrix;
delete[] pLenofEachSubRow;
delete[] pLenofEachSubCol;
delete[] pfTotalSamples;
delete[] pfTransSamples;
delete[] pfSelfDot;
//release memory on GPU
checkCudaErrors(hipFree(pfDevTotalSamples));
checkCudaErrors(hipFree(pfDevTransSamples));
checkCudaErrors(hipFree(pfDevNumofHessianRows));
checkCudaErrors(hipFree(pfDevSelfDot));
//4/5
fclose(pHessianFile);
//5/5 is in smoSolver.h
return bReturn;
}
| ed211039a0e2f1144525c8c42b001a700f5c1f57.cu | /**
* hessianIO.cu
* Created on: May 21, 2012
* Author: Zeyi Wen
* Copyright @DBGroup University of Melbourne
**/
#include "deviceHessian.h"
#include <helper_cuda.h>
#include <sys/time.h>
#include "../gpu_global_utility.h"
#include "../constant.h"
#include "cublas.h"
#include "../storageManager.h"
using std::endl;
long lIO_timer = 0;
long lIO_counter = 0;
CKernelCalculater *DeviceHessian::m_pKernelCalculater = NULL;
/*
* @brief: read Hessian diagonal from file (for RBF kernel, we assign 1.0 directly)
*/
bool DeviceHessian::GetHessianDiag(const string &strFileName, const int &nNumofInstance, float_point *pfHessianDiag)
{
bool bReturn = true;
assert(nNumofInstance > 0);
if(m_pKernelCalculater->GetType() == RBFKERNEL)
m_pKernelCalculater->GetHessianDiag(strFileName, nNumofInstance, pfHessianDiag);
else
{
if(m_nRowStartPos1 != -1)
{
assert(m_nRowStartPos1 >= 0 && m_nRowEndPos1 > 0);
for(int i = m_nRowStartPos1; i <= m_nRowEndPos1; i++)
{
assert(i < m_nTotalNumofInstance);
pfHessianDiag[i - m_nRowStartPos1] = m_pfHessianDiag[i];
}
}
if(m_nRowStartPos2 != -1)
{
int nOffset = 0;
if(m_nRowEndPos1 != -1)
{
nOffset = (m_nRowEndPos1 + 1);
}
assert(m_nRowStartPos2 >= 0 && m_nRowEndPos2 > 0);
for(int i = m_nRowStartPos2; i <= m_nRowEndPos2; i++)
{
assert(i - m_nRowStartPos2 + nOffset < nNumofInstance && (i - m_nRowStartPos2 + nOffset) >= 0);
pfHessianDiag[i - m_nRowStartPos2 + nOffset] = m_pfHessianDiag[i];
}
}
}
return bReturn;
}
/*
* @brief: get the size for each batch write
* @return: the number of Hessian rows for each write
*/
int DeviceHessian::GetNumofBatchWriteRows()
{
int nReturn = 0;
if(m_nTotalNumofInstance > 0)
{
//initialize cache
StorageManager *manager = StorageManager::getManager();
long long nMaxNumofFloatPoint = manager->GetFreeGPUMem();
nReturn = (nMaxNumofFloatPoint / (m_nTotalNumofInstance * sizeof(float_point)));
}
if(nReturn > m_nTotalNumofInstance)
{
nReturn = m_nTotalNumofInstance;
}
return nReturn;
}
/*
* @brief: compute hessian sub matrix
*/
bool DeviceHessian::ComputeSubHessianMatrix(float_point *pfDevTotalSamples, float_point *pfDevTransSamples,float_point *pfDevSelfDot,
float_point *pfDevNumofHessianRows, int nSubMatrixCol, int nSubMatrixRow,
int nStartRow, int nStartCol)
{
bool bReturn = true;
if(m_nNumofDim > NORMAL_NUMOF_DIM)
{
cerr << "the number of dimension is very large" << endl;
}
//compute a few rows of Hessian matrix
long long nHessianRowsSpace = nSubMatrixRow * (long long)nSubMatrixCol;
long long nHessianRowSpaceInByte = sizeof(float_point) * nHessianRowsSpace;
checkCudaErrors(cudaMemset(pfDevNumofHessianRows, 0, nHessianRowSpaceInByte));
timeval t1, t2;
float_point elapsedTime;
gettimeofday(&t1, NULL);
// cout << "computing " << nSubMatrixRow << " sub Hessian rows which have " << nSubMatrixCol << " column each" << endl;
if(cudaGetLastError() != cudaSuccess)
{
cerr << "cuda error before ComputeHessianRows" << endl;
exit(0);
}
//pfDevTotalSamples is for row width (|); pfDevTransSamples is for col width (-)
bool bComputeRows = m_pKernelCalculater->ComputeHessianRows(pfDevTotalSamples, pfDevTransSamples, pfDevSelfDot, pfDevNumofHessianRows,
nSubMatrixCol, m_nNumofDim, nSubMatrixRow, nStartRow, nStartCol);
cudaDeviceSynchronize();
gettimeofday(&t2, NULL);
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0;
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0;
// cout << "computing kernel time " << elapsedTime << " ms.\n";
assert(bComputeRows == true);
return bReturn;
}
/**
* @brief: compute the whole hessian matrix at once
*/
void DeviceHessian::ComputeHessianAtOnce(float_point *pfTotalSamples, float_point *pfTransSamples, float_point *pfSelfDot)
{
//compute a few rows of Hessian matrix
float_point *pfDevTransSamples;
float_point *pfDevNumofHessianRows;
float_point *pfDevTotalSamples;
float_point *pfDevSelfDot;
long lSpace = (long)m_nNumofDim * m_nTotalNumofInstance;
long lResult = (long)m_nTotalNumofInstance * m_nTotalNumofInstance;
checkCudaErrors(cudaMalloc((void**)&pfDevTransSamples, sizeof(float_point) * lSpace));
checkCudaErrors(cudaMalloc((void**)&pfDevTotalSamples, sizeof(float_point) * lSpace));
checkCudaErrors(cudaMalloc((void**)&pfDevNumofHessianRows, sizeof(float_point) * lResult));
checkCudaErrors(cudaMalloc((void**)&pfDevSelfDot, sizeof(float_point) * m_nTotalNumofInstance));
checkCudaErrors(cudaMemcpy(pfDevSelfDot, pfSelfDot, sizeof(float_point) * m_nTotalNumofInstance, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(pfDevTotalSamples, pfTotalSamples,
sizeof(float_point) * lSpace, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(pfDevTransSamples, pfTransSamples,
sizeof(float_point) * lSpace, cudaMemcpyHostToDevice));
ComputeSubHessianMatrix(pfDevTotalSamples, pfDevTransSamples, pfDevSelfDot,
pfDevNumofHessianRows, m_nTotalNumofInstance, m_nTotalNumofInstance, 0, 0);
checkCudaErrors(cudaMemcpy(m_pfHessianRowsInHostMem, pfDevNumofHessianRows,
sizeof(float_point) * lResult, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(pfDevTotalSamples));
checkCudaErrors(cudaFree(pfDevTransSamples));
checkCudaErrors(cudaFree(pfDevNumofHessianRows));
checkCudaErrors(cudaFree(pfDevSelfDot));
}
/*
* @brief: compute Hessian matrix, write Hessian matrix and Hessian diagonal to two files respectively
* @param: strHessianMatrixFileName: file name of a file storing hessian matrix (which serves as an output of this function)
* @param: strDiagHessianFileName: file name of a file storing diagonal of hessian matrix
* @param: v_v_DocVector: document vectors of all the training samples
*/
bool DeviceHessian::PrecomputeHessian(const string &strHessianMatrixFileName,
const string &strDiagHessianFileName,
vector<vector<float_point> > &v_v_DocVector)
{
bool bReturn = true;
m_nTotalNumofInstance = v_v_DocVector.size();
m_nNumofDim = (v_v_DocVector.front()).size();
m_nNumofHessianRowsToWrite = GetNumofBatchWriteRows();
assert(m_nNumofHessianRowsToWrite != 0);
//linear array for training samples
long long nSpaceForSamples = (long long)m_nTotalNumofInstance * m_nNumofDim;
float_point *pfTotalSamples = new float_point[nSpaceForSamples];
memset(pfTotalSamples, 0, sizeof(float_point) * nSpaceForSamples);
//copy samples to a linear array
for(int i = 0; i < m_nTotalNumofInstance; i++)
{
//assign document vector to svm node
for(int j = 0; j < m_nNumofDim; j++)
{
long long nIndex = (long long)i * m_nNumofDim + j;
pfTotalSamples[nIndex] = v_v_DocVector[i][j];
//pfTransSamples[j * m_nTotalNumofInstance + i] = v_v_DocVector[i][j];
}
}
// v_v_DocVector.clear();
float_point *pfTransSamples = new float_point[nSpaceForSamples];
memset(pfTransSamples, 0, sizeof(float_point) * nSpaceForSamples);
//copy samples to a linear array
for(int i = 0; i < m_nTotalNumofInstance; i++)
{
//assign document vector to svm node
for(int j = 0; j < m_nNumofDim; j++)
{
long long nIndex = (long long)j * m_nTotalNumofInstance + i;
long long nIndex2 = (long long)i * m_nNumofDim + j;
pfTransSamples[nIndex] = pfTotalSamples[nIndex2];
}
}
//self dot product
float_point *pfSelfDot = new float_point[m_nTotalNumofInstance];
//copy samples to a linear array
for(int i = 0; i < m_nTotalNumofInstance; i++)
{
//assign document vector to svm node
float_point fTemp = 0;;
for(int j = 0; j < m_nNumofDim; j++)
{
long long nIndex = (long long)i * m_nNumofDim + j;
//fTemp += (v_v_DocVector[i][j] * v_v_DocVector[i][j]);
fTemp += (pfTotalSamples[nIndex] * pfTotalSamples[nIndex]);
}
pfSelfDot[i] = fTemp;
}
//compute the minimum number of sub matrices that are required to calculate the whole Hessian matrix
StorageManager *manager = StorageManager::getManager();
int nNumofPartForARow = manager->PartOfRow(m_nTotalNumofInstance,m_nNumofDim);
int nNumofPartForACol = manager->PartOfCol(nNumofPartForARow, m_nTotalNumofInstance, m_nNumofDim);
cout << nNumofPartForARow << " parts of row; " << nNumofPartForACol << " parts of col.";
cout.flush();
//If the kernel matrix has been computed (for debugging your code), you can use 1/5 to 5/5 to save some computation
//1/5
pHessianFile = fopen(strHessianMatrixFileName.c_str(), "wb");
if(pHessianFile == NULL)
{
cout << "open " << strHessianMatrixFileName << " failed" << endl;
exit(0);
}
/**/
/*********** process the whole matrix at once *****************/
if(nNumofPartForARow == nNumofPartForACol && nNumofPartForACol == 1)
{
ComputeHessianAtOnce(pfTotalSamples, pfTransSamples, pfSelfDot);
delete[] pfTotalSamples;
delete[] pfTransSamples;
delete[] pfSelfDot;
fclose(pHessianFile);
return true;
}
//open file to write. When the file is open, the content is empty
//length for sub row
int *pLenofEachSubRow = new int[nNumofPartForARow];
int nAveLenofSubRow = Ceil(m_nTotalNumofInstance, nNumofPartForARow);
for(int i = 0; i < nNumofPartForARow; i++)
{
if(i + 1 != nNumofPartForARow)
pLenofEachSubRow[i] = nAveLenofSubRow;
else
pLenofEachSubRow[i] = m_nTotalNumofInstance - nAveLenofSubRow * i;
}
//length for sub row
int *pLenofEachSubCol = new int[nNumofPartForACol];
int nAveLenofSubCol = Ceil(m_nTotalNumofInstance, nNumofPartForACol);
for(int i = 0; i < nNumofPartForACol; i++)
{
if(i + 1 != nNumofPartForACol)
pLenofEachSubCol[i] = nAveLenofSubCol;
else
pLenofEachSubCol[i] = m_nTotalNumofInstance - nAveLenofSubCol * i;
}
/*********************start to compute the sub matrices******************/
//variables on host side
long long lMaxSubMatrixSize = (long long)nAveLenofSubCol * nAveLenofSubRow;
long long nMaxTransSamplesInCol = (long long)m_nNumofDim * nAveLenofSubRow;
long long nMaxSamplesInRow = (long long)m_nNumofDim * nAveLenofSubCol;
float_point *pfSubMatrix = new float_point[lMaxSubMatrixSize];
//float_point *pfSubMatrixRowMajor = new float_point[lMaxSubMatrixSize];
float_point *pfTransSamplesForAColInSubMatrix;
pfTransSamplesForAColInSubMatrix = new float_point[nMaxTransSamplesInCol];
float_point *pfSamplesForARowInSubMatrix;
// pfSamplesForARowInSubMatrix = new float_point[nMaxSamplesInRow];
//compute a few rows of Hessian matrix
float_point *pfDevTransSamples;
float_point *pfDevNumofHessianRows;
float_point *pfDevTotalSamples;
float_point *pfDevSelfDot;
checkCudaErrors(cudaMalloc((void**)&pfDevTransSamples, sizeof(float_point) * nMaxTransSamplesInCol));
checkCudaErrors(cudaMalloc((void**)&pfDevTotalSamples, sizeof(float_point) * nMaxSamplesInRow));
checkCudaErrors(cudaMalloc((void**)&pfDevNumofHessianRows, sizeof(float_point) * lMaxSubMatrixSize));
checkCudaErrors(cudaMalloc((void**)&pfDevSelfDot, sizeof(float_point) * m_nTotalNumofInstance));
checkCudaErrors(cudaMemcpy(pfDevSelfDot, pfSelfDot, sizeof(float_point) * m_nTotalNumofInstance, cudaMemcpyHostToDevice));
//compuate sub matrix
for(int iPartofCol = 0; iPartofCol < nNumofPartForACol; iPartofCol++)
{
//get sub matrix row
int nSubMatrixRow;
if(iPartofCol == nNumofPartForACol - 1)
nSubMatrixRow = pLenofEachSubCol[iPartofCol];
else
nSubMatrixRow = nAveLenofSubCol;
for(int jPartofRow = 0; jPartofRow < nNumofPartForARow; jPartofRow++)
{
int nSubMatrixCol;
//get sub matrix column
if(jPartofRow == nNumofPartForARow - 1)
nSubMatrixCol = pLenofEachSubRow[jPartofRow];
else
nSubMatrixCol = nAveLenofSubRow;
// cout << "row= " << nSubMatrixRow << " col= " << nSubMatrixCol << endl;
// cout << ".";
// cout.flush();
//allocate memory for this sub matrix
long long nHessianSubMatrixSpace = nSubMatrixRow * nSubMatrixCol;
memset(pfSubMatrix, 0, sizeof(float_point) * nHessianSubMatrixSpace);
//get the copies of sample data for computing sub matrix
//sample for sub matrix rows
/*for(int d = 0; d < m_nNumofDimensions; d++)
{
memcpy(pfSamplesForARowInSubMatrix + d * nSubMatrixRow,
pfTotalSamples + iPartofCol * nAveLenofSubCol + d * m_nTotalNumofSamples,
sizeof(float_point) * nSubMatrixRow);
}
pfTransSamplesForAColInSubMatrix = pfTransSamples + jPartofRow * nAveLenofSubRow * m_nNumofDimensions;*/
pfSamplesForARowInSubMatrix = pfTotalSamples + (long long)iPartofCol * nAveLenofSubCol * m_nNumofDim;
for(int d = 0; d < m_nNumofDim; d++)
{
//for(int k = 0; k < nSubMatrixCol; k++)
//{
//pfTransSamplesForAColInSubMatrix[k + d * nSubMatrixCol] =
//pfTransSamples[k + jPartofRow * nAveLenofSubRow + d * m_nTotalNumofSamples];
long long nSampleIndex = (long long)jPartofRow * nAveLenofSubRow + (long long)d * m_nTotalNumofInstance;
long long nIndexForSub = (long long)d * nSubMatrixCol;
memcpy(pfTransSamplesForAColInSubMatrix + nIndexForSub,
pfTransSamples + nSampleIndex, sizeof(float_point) * nSubMatrixCol);
//}
}
long long nSpaceForSamplesInRow = (long long)m_nNumofDim * nSubMatrixRow;
long long nSpaceForSamplesInCol = (long long)m_nNumofDim * nSubMatrixCol;
checkCudaErrors(cudaMemcpy(pfDevTotalSamples, pfSamplesForARowInSubMatrix,
sizeof(float_point) * nSpaceForSamplesInRow, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(pfDevTransSamples, pfTransSamplesForAColInSubMatrix,
sizeof(float_point) * nSpaceForSamplesInCol, cudaMemcpyHostToDevice));
//
//compute the value of the sub matrix
int nStartRow = iPartofCol * nAveLenofSubCol;
int nStartCol = jPartofRow * nAveLenofSubRow;
ComputeSubHessianMatrix(pfDevTotalSamples, pfDevTransSamples, pfDevSelfDot,
pfDevNumofHessianRows, nSubMatrixCol, nSubMatrixRow,
nStartRow, nStartCol);
int nHessianRowsSpace = nSubMatrixRow * nSubMatrixCol;
checkCudaErrors(cudaMemcpy(pfSubMatrix, pfDevNumofHessianRows,
sizeof(float_point) * nHessianRowsSpace, cudaMemcpyDeviceToHost));
//store the sub matrix
//hessian sub matrix info
SubMatrix subMatrix;
subMatrix.nColIndex = jPartofRow * nAveLenofSubRow;
subMatrix.nColSize = nSubMatrixCol;
subMatrix.nRowIndex = iPartofCol * nAveLenofSubCol;
subMatrix.nRowSize = nSubMatrixRow;
SaveRows(pfSubMatrix, subMatrix);
}
cout << ".";
cout.flush();
}
delete[] pfSubMatrix;
delete[] pfTransSamplesForAColInSubMatrix;
delete[] pLenofEachSubRow;
delete[] pLenofEachSubCol;
delete[] pfTotalSamples;
delete[] pfTransSamples;
delete[] pfSelfDot;
//release memory on GPU
checkCudaErrors(cudaFree(pfDevTotalSamples));
checkCudaErrors(cudaFree(pfDevTransSamples));
checkCudaErrors(cudaFree(pfDevNumofHessianRows));
checkCudaErrors(cudaFree(pfDevSelfDot));
//4/5
fclose(pHessianFile);
//5/5 is in smoSolver.h
return bReturn;
}
|
9c3407cf9121fe6eef2f51b40b0f7eab1a3f73f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <float.h>
#include <stdio.h>
__inline__ __device__ void unravel(int ray_idx, float *pixel) {
pixel[0] = ray_idx / $height;
pixel[1] = ray_idx % $height;
}
/**
* Compute the dot product between a matrix of size 4x3 and a vector 3x1
* assuming that the vector is in homogenous coordinates meaning its size is
* 3x1. Store the result as a 3d point (assuming homogenous again).
*/
__inline__ __device__ void dot_m43v3(float *m, float *v, double *out) {
// Used to normalize the 2d point into homogenous coordinates
double normalizer;
normalizer = out[0] = out[1] = out[2] = 0;
out[0] += m[0*3 + 0] * v[0];
out[0] += m[0*3 + 1] * v[1];
out[0] += m[0*3 + 2] * 1.0;
out[1] += m[1*3 + 0] * v[0];
out[1] += m[1*3 + 1] * v[1];
out[1] += m[1*3 + 2] * 1.0;
out[2] += m[2*3 + 0] * v[0];
out[2] += m[2*3 + 1] * v[1];
out[2] += m[2*3 + 2] * 1.0;
normalizer += m[3*3 + 0] * v[0];
normalizer += m[3*3 + 1] * v[1];
normalizer += m[3*3 + 2] * 1.0;
out[0] /= normalizer;
out[1] /= normalizer;
out[2] /= normalizer;
}
/**
* Sample uniform points in a bounding box
*/
__inline__ __device__ void sample_in_bbox(
int ray_idx,
float * P_inv,
float * camera_center,
float * ray_start,
float * ray_end
) {
float pixel[2], dir[3];
// Get the pixel based on the ray index
unravel(ray_idx, pixel);
// Project the 2d pixel and get the corresponding ray
double ray[3];
dot_m43v3(P_inv, pixel, ray);
for (int i=0; i<3; i++) {
dir[i] = ray[i] - camera_center[i];
}
float t_near = -INFINITY;
float t_far = INFINITY;
float t1, t2, t_near_actual, t_far_actual;
t1 = ($bbox_min_x - camera_center[0]) / dir[0];
t2 = ($bbox_max_x - camera_center[0]) / dir[0];
t_near = max(min(t1, t2), t_near);
t_far = min(max(t1, t2), t_far);
t1 = ($bbox_min_y - camera_center[1]) / dir[1];
t2 = ($bbox_max_y - camera_center[1]) / dir[1];
t_near = max(min(t1, t2), t_near);
t_far = min(max(t1, t2), t_far);
t1 = ($bbox_min_z - camera_center[2]) / dir[2];
t2 = ($bbox_max_z - camera_center[2]) / dir[2];
t_near = max(min(t1, t2), t_near);
t_far = min(max(t1, t2), t_far);
// Swap t_near and t_far in case of negative values
float near_mask = abs(t_near) < abs(t_far);
t_near_actual = t_near * near_mask + t_far * (1 - near_mask);
t_far_actual = (1 - near_mask) * t_near + near_mask * t_far;
// Compute the ray_start and ray_end
for (int i=0; i<3; i++) {
ray_start[i] = camera_center[i] + t_near_actual*dir[i];
ray_end[i] = camera_center[i] + t_far_actual*dir[i];
}
}
__global__ void batch_sample_points_in_bbox(
int n_rays,
int * ray_idxs,
float * P_inv,
float * camera_center,
float * points
) {
// Compute the ray that this thread is going to be computing stuff for
int r = threadIdx.x + blockDim.x * blockIdx.x;
if (r >= n_rays)
return;
// Get the ray start and ray_end for the current ray
float ray_start[3], ray_end[3];
$sampling_scheme(
ray_idxs[r],
P_inv,
camera_center,
ray_start,
ray_end
);
int offset = r * $depth_planes * 4;
// Get the rest of the uniformly sampled points
for (int k=0; k<$depth_planes; k++) {
points[offset + 4*k] = ray_start[0] + k*(ray_end[0] - ray_start[0])/($depth_planes - 1);
points[offset + 4*k + 1] = ray_start[1] + k*(ray_end[1] - ray_start[1])/($depth_planes - 1);
points[offset + 4*k + 2] = ray_start[2] + k*(ray_end[2] - ray_start[2])/($depth_planes - 1);
points[offset + 4*k + 3] = 1.0;
}
}
| 9c3407cf9121fe6eef2f51b40b0f7eab1a3f73f9.cu | #include <float.h>
#include <stdio.h>
__inline__ __device__ void unravel(int ray_idx, float *pixel) {
pixel[0] = ray_idx / $height;
pixel[1] = ray_idx % $height;
}
/**
* Compute the dot product between a matrix of size 4x3 and a vector 3x1
* assuming that the vector is in homogenous coordinates meaning its size is
* 3x1. Store the result as a 3d point (assuming homogenous again).
*/
__inline__ __device__ void dot_m43v3(float *m, float *v, double *out) {
// Used to normalize the 2d point into homogenous coordinates
double normalizer;
normalizer = out[0] = out[1] = out[2] = 0;
out[0] += m[0*3 + 0] * v[0];
out[0] += m[0*3 + 1] * v[1];
out[0] += m[0*3 + 2] * 1.0;
out[1] += m[1*3 + 0] * v[0];
out[1] += m[1*3 + 1] * v[1];
out[1] += m[1*3 + 2] * 1.0;
out[2] += m[2*3 + 0] * v[0];
out[2] += m[2*3 + 1] * v[1];
out[2] += m[2*3 + 2] * 1.0;
normalizer += m[3*3 + 0] * v[0];
normalizer += m[3*3 + 1] * v[1];
normalizer += m[3*3 + 2] * 1.0;
out[0] /= normalizer;
out[1] /= normalizer;
out[2] /= normalizer;
}
/**
* Sample uniform points in a bounding box
*/
__inline__ __device__ void sample_in_bbox(
int ray_idx,
float * P_inv,
float * camera_center,
float * ray_start,
float * ray_end
) {
float pixel[2], dir[3];
// Get the pixel based on the ray index
unravel(ray_idx, pixel);
// Project the 2d pixel and get the corresponding ray
double ray[3];
dot_m43v3(P_inv, pixel, ray);
for (int i=0; i<3; i++) {
dir[i] = ray[i] - camera_center[i];
}
float t_near = -INFINITY;
float t_far = INFINITY;
float t1, t2, t_near_actual, t_far_actual;
t1 = ($bbox_min_x - camera_center[0]) / dir[0];
t2 = ($bbox_max_x - camera_center[0]) / dir[0];
t_near = max(min(t1, t2), t_near);
t_far = min(max(t1, t2), t_far);
t1 = ($bbox_min_y - camera_center[1]) / dir[1];
t2 = ($bbox_max_y - camera_center[1]) / dir[1];
t_near = max(min(t1, t2), t_near);
t_far = min(max(t1, t2), t_far);
t1 = ($bbox_min_z - camera_center[2]) / dir[2];
t2 = ($bbox_max_z - camera_center[2]) / dir[2];
t_near = max(min(t1, t2), t_near);
t_far = min(max(t1, t2), t_far);
// Swap t_near and t_far in case of negative values
float near_mask = abs(t_near) < abs(t_far);
t_near_actual = t_near * near_mask + t_far * (1 - near_mask);
t_far_actual = (1 - near_mask) * t_near + near_mask * t_far;
// Compute the ray_start and ray_end
for (int i=0; i<3; i++) {
ray_start[i] = camera_center[i] + t_near_actual*dir[i];
ray_end[i] = camera_center[i] + t_far_actual*dir[i];
}
}
__global__ void batch_sample_points_in_bbox(
int n_rays,
int * ray_idxs,
float * P_inv,
float * camera_center,
float * points
) {
// Compute the ray that this thread is going to be computing stuff for
int r = threadIdx.x + blockDim.x * blockIdx.x;
if (r >= n_rays)
return;
// Get the ray start and ray_end for the current ray
float ray_start[3], ray_end[3];
$sampling_scheme(
ray_idxs[r],
P_inv,
camera_center,
ray_start,
ray_end
);
int offset = r * $depth_planes * 4;
// Get the rest of the uniformly sampled points
for (int k=0; k<$depth_planes; k++) {
points[offset + 4*k] = ray_start[0] + k*(ray_end[0] - ray_start[0])/($depth_planes - 1);
points[offset + 4*k + 1] = ray_start[1] + k*(ray_end[1] - ray_start[1])/($depth_planes - 1);
points[offset + 4*k + 2] = ray_start[2] + k*(ray_end[2] - ray_start[2])/($depth_planes - 1);
points[offset + 4*k + 3] = 1.0;
}
}
|
dd1396ab0a96d3e1bf7e1f271b83fd78a75d1732.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHTensorRandom.h"
#include "THHDeviceUtils.cuh"
#include "THHGeneral.h"
#include "THHTensorCopy.h"
#include "THHTensorMath.h"
#include "THHReduceApplyUtils.cuh"
#include "THHTensorRandom.cuh"
#include <thrust/functional.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_mtgp32_host.h>
#include <rocrand/rocrand_mtgp32_11213.h>
#define MAX_NUM_BLOCKS 64
#define BLOCK_SIZE 256
THCGenerator* THCRandom_getGenerator(THCState* state);
/* Sets up generator. Allocates but does not create the generator states. */
__host__ void initializeGenerator(THCState *state, THCGenerator* gen)
{
THCudaCheck(THCudaMalloc(state, (void**)&gen->gen_states, MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t)));
THCudaCheck(THCudaMalloc(state, (void**)&gen->kernel_params, sizeof(mtgp32_kernel_params_t)));
}
/* Creates a new generator state given the seed. */
__host__ void createGeneratorState(THCGenerator* gen, uint64_t seed)
{
if (hiprandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->kernel_params) != HIPRAND_STATUS_SUCCESS)
{
THError("Creating MTGP constants failed.");
}
if (hiprandMakeMTGP32KernelState(gen->gen_states, mtgp32dc_params_fast_11213,
gen->kernel_params, MAX_NUM_BLOCKS, seed) != HIPRAND_STATUS_SUCCESS)
{
THError("Creating MTGP kernel state failed.");
}
}
__host__ void THCRandom_getRNGState(THCState* state, THByteTensor *rng_state)
{
THCGenerator* gen = THCRandom_getGenerator(state);
// The RNG state comprises the MTPG32 states, the seed, and an offset used for Philox
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t);
static const size_t seed_size = sizeof(gen->initial_seed);
static const size_t offset_size = sizeof(gen->philox_seed_offset);
static const size_t total_size = states_size + seed_size + offset_size;
THByteTensor_resize1d(rng_state, total_size);
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(hipMemcpy(THByteTensor_data(rng_state), gen->gen_states,
states_size, hipMemcpyDeviceToHost));
memcpy(THByteTensor_data(rng_state) + states_size, &gen->initial_seed, seed_size);
memcpy(THByteTensor_data(rng_state) + states_size + seed_size, &gen->philox_seed_offset, offset_size);
}
__global__ void set_rngstate_kernel(hiprandStateMtgp32_t *state, mtgp32_kernel_params_t *kernel)
{
state[threadIdx.x].k = kernel;
}
__host__ void THCRandom_setRNGState(THCState* state, THByteTensor *rng_state)
{
THCGenerator* gen = THCRandom_getGenerator(state);
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t);
static const size_t seed_size = sizeof(gen->initial_seed);
static const size_t offset_size = sizeof(gen->philox_seed_offset);
static const size_t total_size = states_size + seed_size + offset_size;
bool no_philox_seed = false;
if (THByteTensor_nElement(rng_state) == total_size - offset_size) {
no_philox_seed = true;
}
else {
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
}
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(hipMemcpy(gen->gen_states, THByteTensor_data(rng_state),
states_size, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( set_rngstate_kernel), dim3(1), dim3(MAX_NUM_BLOCKS), 0, THCState_getCurrentStream(state),
gen->gen_states, gen->kernel_params);
memcpy(&gen->initial_seed, THByteTensor_data(rng_state) + states_size, seed_size);
if (!no_philox_seed) {
memcpy(&gen->philox_seed_offset, THByteTensor_data(rng_state) + states_size + seed_size, offset_size);
}
else {
gen->philox_seed_offset = 0;
}
}
// Goes from (0, 1] to [0, 1). Note 1-x is not sufficient since for some floats
// eps near 0, 1-eps will round to 1.
template <typename T>
__device__ inline T reverse_bounds(T value) {
if (THCNumerics<T>::eq(value, ScalarConvert<int, T>::to(1))) {
return ScalarConvert<int, T>::to(0);
}
return value;
}
#ifdef CUDA_HALF_TENSOR
__device__ inline half half_uniform_scale_and_shift(float x, double a, double b) {
half width = ScalarConvert<double, half>::to(b - a);
half start = ScalarConvert<double, half>::to(a);
half scaled = THCNumerics<half>::mul(reverse_bounds(ScalarConvert<float, half>::to(x)), width);
return THCNumerics<half>::add(scaled, start);
}
#endif
#define GENERATE_KERNEL1(NAME, T, ARG1, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(hiprandStateMtgp32_t *state, int size, T *result, ARG1) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
#define GENERATE_KERNEL2(NAME, T, ARG1, ARG2, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(hiprandStateMtgp32_t *state, int size, T *result, ARG1, ARG2) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
template<typename T, typename U>
struct is_same { static const bool value = false; };
template<typename T>
struct is_same<T, T> { static const bool value = true; };
template<typename real, typename prob_type>
__global__ void generate_bernoulli_tensor(hiprandStateMtgp32_t *state, int size,
real *result, prob_type *probs)
{
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE;
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) {
if (is_same<prob_type, double>::value) {
double x = hiprand_uniform_double(&state[blockIdx.x]);
if (i < size)
result[i] = ScalarConvert<bool, real>::to(x <= probs[i]);
} else {
float x = hiprand_uniform(&state[blockIdx.x]);
if (i < size)
result[i] = ScalarConvert<bool, real>::to(x <= probs[i]);
}
}
}
// NOTE: hiprand_uniform is (0, 1] and we want [a, b)
GENERATE_KERNEL2(generate_uniform, float, float a, float b, float, hiprand_uniform, reverse_bounds(x) * (b-a) + a)
GENERATE_KERNEL2(generate_uniform, double, double a, double b, double, hiprand_uniform_double, reverse_bounds(x) * (b-a) + a)
GENERATE_KERNEL2(generate_normal, float, double mean, double stdv, float, hiprand_normal, (x * stdv) + mean)
GENERATE_KERNEL2(generate_normal, double, double mean, double stdv, double, hiprand_normal_double, (x * stdv) + mean)
GENERATE_KERNEL1(generate_exponential, float, double lambda, float, hiprand_uniform, (float)(-1. / lambda * log(x)))
GENERATE_KERNEL1(generate_exponential, double, double lambda, double, hiprand_uniform_double, (double)(-1. / lambda * log(x)))
GENERATE_KERNEL2(generate_cauchy, float, double median, double sigma, float, hiprand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5))))
GENERATE_KERNEL2(generate_cauchy, double, double median, double sigma, double, hiprand_uniform_double, (double)(median + sigma * tan(M_PI*(x-0.5))))
#ifdef CUDA_HALF_TENSOR
GENERATE_KERNEL2(generate_uniform, half, double a, double b, float, hiprand_uniform, (half_uniform_scale_and_shift(x, a, b)))
GENERATE_KERNEL2(generate_normal, half, double mean, double stdv, float, hiprand_normal, (ScalarConvert<float, half>::to((x * stdv) + mean)))
GENERATE_KERNEL1(generate_exponential, half, double lambda, float, hiprand_uniform, (ScalarConvert<float, half>::to((float)(-1. / lambda * log(x)))))
GENERATE_KERNEL2(generate_cauchy, half, double median, double sigma, float, hiprand_uniform, (ScalarConvert<float, half>::to((float)(median + sigma * tan(M_PI*(x-0.5))))))
#endif // CUDA_HALF_TENSOR
#include "generic/THCTensorRandom.cu"
#include "THHGenerateAllTypes.h"
#undef GENERATE_KERNEL1
#undef GENERATE_KERNEL2
| dd1396ab0a96d3e1bf7e1f271b83fd78a75d1732.cu | #include "THCTensorRandom.h"
#include "THCDeviceUtils.cuh"
#include "THCGeneral.h"
#include "THCTensorCopy.h"
#include "THCTensorMath.h"
#include "THCReduceApplyUtils.cuh"
#include "THCTensorRandom.cuh"
#include <thrust/functional.h>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_mtgp32_host.h>
#include <curand_mtgp32dc_p_11213.h>
#define MAX_NUM_BLOCKS 64
#define BLOCK_SIZE 256
THCGenerator* THCRandom_getGenerator(THCState* state);
/* Sets up generator. Allocates but does not create the generator states. */
__host__ void initializeGenerator(THCState *state, THCGenerator* gen)
{
THCudaCheck(THCudaMalloc(state, (void**)&gen->gen_states, MAX_NUM_BLOCKS * sizeof(curandStateMtgp32)));
THCudaCheck(THCudaMalloc(state, (void**)&gen->kernel_params, sizeof(mtgp32_kernel_params)));
}
/* Creates a new generator state given the seed. */
__host__ void createGeneratorState(THCGenerator* gen, uint64_t seed)
{
if (curandMakeMTGP32Constants(mtgp32dc_params_fast_11213, gen->kernel_params) != CURAND_STATUS_SUCCESS)
{
THError("Creating MTGP constants failed.");
}
if (curandMakeMTGP32KernelState(gen->gen_states, mtgp32dc_params_fast_11213,
gen->kernel_params, MAX_NUM_BLOCKS, seed) != CURAND_STATUS_SUCCESS)
{
THError("Creating MTGP kernel state failed.");
}
}
__host__ void THCRandom_getRNGState(THCState* state, THByteTensor *rng_state)
{
THCGenerator* gen = THCRandom_getGenerator(state);
// The RNG state comprises the MTPG32 states, the seed, and an offset used for Philox
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32);
static const size_t seed_size = sizeof(gen->initial_seed);
static const size_t offset_size = sizeof(gen->philox_seed_offset);
static const size_t total_size = states_size + seed_size + offset_size;
THByteTensor_resize1d(rng_state, total_size);
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(cudaMemcpy(THByteTensor_data(rng_state), gen->gen_states,
states_size, cudaMemcpyDeviceToHost));
memcpy(THByteTensor_data(rng_state) + states_size, &gen->initial_seed, seed_size);
memcpy(THByteTensor_data(rng_state) + states_size + seed_size, &gen->philox_seed_offset, offset_size);
}
__global__ void set_rngstate_kernel(curandStateMtgp32 *state, mtgp32_kernel_params *kernel)
{
state[threadIdx.x].k = kernel;
}
__host__ void THCRandom_setRNGState(THCState* state, THByteTensor *rng_state)
{
THCGenerator* gen = THCRandom_getGenerator(state);
static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32);
static const size_t seed_size = sizeof(gen->initial_seed);
static const size_t offset_size = sizeof(gen->philox_seed_offset);
static const size_t total_size = states_size + seed_size + offset_size;
bool no_philox_seed = false;
if (THByteTensor_nElement(rng_state) == total_size - offset_size) {
no_philox_seed = true;
}
else {
THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size");
}
THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous");
THCudaCheck(cudaMemcpy(gen->gen_states, THByteTensor_data(rng_state),
states_size, cudaMemcpyHostToDevice));
set_rngstate_kernel<<<1, MAX_NUM_BLOCKS, 0, THCState_getCurrentStream(state)>>>(
gen->gen_states, gen->kernel_params);
memcpy(&gen->initial_seed, THByteTensor_data(rng_state) + states_size, seed_size);
if (!no_philox_seed) {
memcpy(&gen->philox_seed_offset, THByteTensor_data(rng_state) + states_size + seed_size, offset_size);
}
else {
gen->philox_seed_offset = 0;
}
}
// Goes from (0, 1] to [0, 1). Note 1-x is not sufficient since for some floats
// eps near 0, 1-eps will round to 1.
template <typename T>
__device__ inline T reverse_bounds(T value) {
if (THCNumerics<T>::eq(value, ScalarConvert<int, T>::to(1))) {
return ScalarConvert<int, T>::to(0);
}
return value;
}
#ifdef CUDA_HALF_TENSOR
__device__ inline half half_uniform_scale_and_shift(float x, double a, double b) {
half width = ScalarConvert<double, half>::to(b - a);
half start = ScalarConvert<double, half>::to(a);
half scaled = THCNumerics<half>::mul(reverse_bounds(ScalarConvert<float, half>::to(x)), width);
return THCNumerics<half>::add(scaled, start);
}
#endif
#define GENERATE_KERNEL1(NAME, T, ARG1, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(curandStateMtgp32 *state, int size, T *result, ARG1) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
#define GENERATE_KERNEL2(NAME, T, ARG1, ARG2, CURAND_T, CURAND_FUNC, TRANSFORM) \
__global__ void NAME(curandStateMtgp32 *state, int size, T *result, ARG1, ARG2) \
{ \
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; \
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE; \
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) { \
CURAND_T x = CURAND_FUNC(&state[blockIdx.x]); \
if (i < size) { \
T y = TRANSFORM; \
result[i] = y; \
} \
} \
}
template<typename T, typename U>
struct is_same { static const bool value = false; };
template<typename T>
struct is_same<T, T> { static const bool value = true; };
template<typename real, typename prob_type>
__global__ void generate_bernoulli_tensor(curandStateMtgp32 *state, int size,
real *result, prob_type *probs)
{
int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int rounded_size = THCCeilDiv(size, BLOCK_SIZE) * BLOCK_SIZE;
for (int i = idx; i < rounded_size; i += BLOCK_SIZE * MAX_NUM_BLOCKS) {
if (is_same<prob_type, double>::value) {
double x = curand_uniform_double(&state[blockIdx.x]);
if (i < size)
result[i] = ScalarConvert<bool, real>::to(x <= probs[i]);
} else {
float x = curand_uniform(&state[blockIdx.x]);
if (i < size)
result[i] = ScalarConvert<bool, real>::to(x <= probs[i]);
}
}
}
// NOTE: curand_uniform is (0, 1] and we want [a, b)
GENERATE_KERNEL2(generate_uniform, float, float a, float b, float, curand_uniform, reverse_bounds(x) * (b-a) + a)
GENERATE_KERNEL2(generate_uniform, double, double a, double b, double, curand_uniform_double, reverse_bounds(x) * (b-a) + a)
GENERATE_KERNEL2(generate_normal, float, double mean, double stdv, float, curand_normal, (x * stdv) + mean)
GENERATE_KERNEL2(generate_normal, double, double mean, double stdv, double, curand_normal_double, (x * stdv) + mean)
GENERATE_KERNEL1(generate_exponential, float, double lambda, float, curand_uniform, (float)(-1. / lambda * log(x)))
GENERATE_KERNEL1(generate_exponential, double, double lambda, double, curand_uniform_double, (double)(-1. / lambda * log(x)))
GENERATE_KERNEL2(generate_cauchy, float, double median, double sigma, float, curand_uniform, (float)(median + sigma * tan(M_PI*(x-0.5))))
GENERATE_KERNEL2(generate_cauchy, double, double median, double sigma, double, curand_uniform_double, (double)(median + sigma * tan(M_PI*(x-0.5))))
#ifdef CUDA_HALF_TENSOR
GENERATE_KERNEL2(generate_uniform, half, double a, double b, float, curand_uniform, (half_uniform_scale_and_shift(x, a, b)))
GENERATE_KERNEL2(generate_normal, half, double mean, double stdv, float, curand_normal, (ScalarConvert<float, half>::to((x * stdv) + mean)))
GENERATE_KERNEL1(generate_exponential, half, double lambda, float, curand_uniform, (ScalarConvert<float, half>::to((float)(-1. / lambda * log(x)))))
GENERATE_KERNEL2(generate_cauchy, half, double median, double sigma, float, curand_uniform, (ScalarConvert<float, half>::to((float)(median + sigma * tan(M_PI*(x-0.5))))))
#endif // CUDA_HALF_TENSOR
#include "generic/THCTensorRandom.cu"
#include "THCGenerateAllTypes.h"
#undef GENERATE_KERNEL1
#undef GENERATE_KERNEL2
|
b03fbff55e1afb37550ffa1c255edeafcc86e66e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -(size/2.f);
int h_offset = -(size/2.f);
int out_index = j + w*(i + h*(k + c*b));
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i + l;
int cur_w = w_offset + j + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
delta[out_index] += valid ? rate*(x[index] - x[out_index]) : 0;
}
}
} | b03fbff55e1afb37550ffa1c255edeafcc86e66e.cu | #include "includes.h"
__global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -(size/2.f);
int h_offset = -(size/2.f);
int out_index = j + w*(i + h*(k + c*b));
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i + l;
int cur_w = w_offset + j + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
delta[out_index] += valid ? rate*(x[index] - x[out_index]) : 0;
}
}
} |
37c18a8852ee45e4758fcfe2021271f9f9a10e9a.hip | // !!! This is a file automatically generated by hipify!!!
/*
The implementation of this file is based on qkvToContext plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Modifications: scaling is moved from masked softmax to the gemm before that.
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <hip/hip_fp16.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "attention_impl.h"
#include "attention_softmax.h"
using namespace onnxruntime::cuda;
using namespace cub;
namespace onnxruntime {
namespace contrib {
namespace cuda {
static size_t AlignTo(size_t a, size_t b) {
return CeilDiv(a, b) * b;
}
size_t GetAttentionScratchSize(size_t element_size, int batch_size, int num_heads, int sequence_length, int all_sequence_length) {
const size_t len = batch_size * num_heads * sequence_length * all_sequence_length;
const size_t bytes = len * element_size;
const size_t alignment = 256;
const size_t bytesAligned = AlignTo(bytes, alignment);
return bytesAligned;
}
size_t GetAttentionWorkspaceSize(
size_t element_size,
int batch_size,
int num_heads,
int head_size,
int sequence_length,
int past_sequence_length) {
size_t qkv_size = 3 * batch_size * sequence_length * num_heads * head_size * element_size;
return qkv_size + 2 * GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, past_sequence_length + sequence_length);
}
template <typename T>
bool QkvToContext(
const hipDeviceProp_t& prop, hipblasHandle_t& cublas, hipStream_t stream,
const int batch_size, const int sequence_length, const int num_heads, const int head_size, const size_t element_size,
const T* input, T* output, T* workspace,
const int* mask_index, const std::vector<int64_t>* mask_index_dims,
bool is_unidirectional, int past_sequence_length, const T* past, T* present) {
const int all_sequence_length = past_sequence_length + sequence_length;
const size_t bytes = GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, all_sequence_length);
T* scratch1 = workspace;
T* scratch2 = scratch1 + (bytes / element_size);
T* scratch3 = scratch2 + (bytes / element_size);
const int max_threads_per_block(prop.maxThreadsPerBlock);
// input should be BxSx3xNxH => scratch3: 3xBxNxSxH
if (!LaunchTransQkv(stream, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, input, scratch3)) {
return false;
}
// now scratch3 has Q, K, V: each has size BxNxSxH
const int batches = batch_size * num_heads;
const int size_per_batch = sequence_length * head_size;
const int total_size = batches * size_per_batch;
const T* q = scratch3;
const T* k = q + total_size;
const T* v = k + total_size;
hipblasSetStream(cublas, stream);
CublasMathModeSetter helper(prop, cublas, CUBLAS_TENSOR_OP_MATH);
// Concat past (2xBxNxS'xH) to present (2xBxNxS*xH):
// past_k (BxNxS'xH) + k (BxNxSxH) => present_k (BxNxS*xH)
// past_v (BxNxS'xH) + v (BxNxSxH) => present_v (BxNxS*xH)
const int present_size_per_batch = all_sequence_length * head_size;
if (nullptr != present) {
if (!LaunchConcatPastToPresent(stream, all_sequence_length, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, past, k, present)) {
return false;
}
// update pointers to present_k and present_v.
k = present;
v = present + batches * present_size_per_batch;
}
// Raw attention mask could be 2D (BxS) or 3D (BxSxS*)
bool use_raw_attention_mask = (nullptr != mask_index && nullptr != mask_index_dims && mask_index_dims->size() >= 2);
// compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxS*
// Q: BxNxSxH, K (present_k): BxNxS*xH, Q*K': BxNxSxS*
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
const int temp_matrix_size = sequence_length * all_sequence_length;
// For raw attention mask, the scalar if 1/sqrt(H) is moved to softmax computation.
T alpha = (T)(use_raw_attention_mask ? 1.0f : rsqrt_head_size);
if (!CUBLAS_CALL(CublasGemmStridedBatched(
cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, all_sequence_length, sequence_length, head_size, alpha, k, head_size, present_size_per_batch,
q, head_size, size_per_batch, 0.f, scratch1, all_sequence_length, temp_matrix_size, batches))) {
return false;
}
// apply softmax and store result P to scratch2: BxNxSxS*
if (use_raw_attention_mask) { // 2d or 3d attention mask
if (!ComputeSoftmaxWithRawMask<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, scratch1, scratch2, is_unidirectional, rsqrt_head_size, static_cast<int>(mask_index_dims->size()))) {
return false;
}
} else if (nullptr != mask_index) { // 1d mask index
ORT_ENFORCE(nullptr != mask_index_dims && mask_index_dims->size() == 1);
// mask_index has 1D shape: either (batch_size) or (2*batch_size). Only the later one has start postions.
const int* mask_start = (mask_index_dims->at(0) > batch_size) ? mask_index + batch_size : nullptr;
if (!ComputeSoftmaxWithMask1D<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, mask_start, scratch1, scratch2, is_unidirectional)) {
return false;
}
} else { // no mask
if (!ComputeSoftmax<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, scratch1, scratch2, is_unidirectional)) {
return false;
}
}
// compute P*V (as V*P), and store in scratch3: BxNxSxH
if (!CUBLAS_CALL(CublasGemmStridedBatched(
cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, head_size, sequence_length, all_sequence_length, 1.f, v, head_size, present_size_per_batch,
scratch2, all_sequence_length, temp_matrix_size, 0.f, scratch3, head_size, size_per_batch, batches))) {
return false;
}
// scratch3 is BxNxSxH, transpose to output BxSxNxH
return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, scratch3, output);
}
bool LaunchAttentionKernel(
const hipDeviceProp_t& prop,
hipStream_t stream,
const void* input,
const int* mask_index,
const std::vector<int64_t>* mask_index_dims,
void* output,
const int batch_size,
const int sequence_length,
const int num_heads,
const int head_size,
void* workspace,
hipblasHandle_t& cublas,
const size_t element_size,
bool is_unidirectional,
int past_sequence_length,
const void* past,
void* present) {
if (element_size == 2) {
return QkvToContext(prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, element_size,
reinterpret_cast<const half*>(input), reinterpret_cast<half*>(output), reinterpret_cast<half*>(workspace),
mask_index, mask_index_dims, is_unidirectional,
past_sequence_length, reinterpret_cast<const half*>(past), reinterpret_cast<half*>(present));
} else {
return QkvToContext(prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, element_size,
reinterpret_cast<const float*>(input), reinterpret_cast<float*>(output), reinterpret_cast<float*>(workspace),
mask_index, mask_index_dims, is_unidirectional,
past_sequence_length, reinterpret_cast<const float*>(past), reinterpret_cast<float*>(present));
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 37c18a8852ee45e4758fcfe2021271f9f9a10e9a.cu | /*
The implementation of this file is based on qkvToContext plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Modifications: scaling is moved from masked softmax to the gemm before that.
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cuda_fp16.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "attention_impl.h"
#include "attention_softmax.h"
using namespace onnxruntime::cuda;
using namespace cub;
namespace onnxruntime {
namespace contrib {
namespace cuda {
static size_t AlignTo(size_t a, size_t b) {
return CeilDiv(a, b) * b;
}
size_t GetAttentionScratchSize(size_t element_size, int batch_size, int num_heads, int sequence_length, int all_sequence_length) {
const size_t len = batch_size * num_heads * sequence_length * all_sequence_length;
const size_t bytes = len * element_size;
const size_t alignment = 256;
const size_t bytesAligned = AlignTo(bytes, alignment);
return bytesAligned;
}
size_t GetAttentionWorkspaceSize(
size_t element_size,
int batch_size,
int num_heads,
int head_size,
int sequence_length,
int past_sequence_length) {
size_t qkv_size = 3 * batch_size * sequence_length * num_heads * head_size * element_size;
return qkv_size + 2 * GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, past_sequence_length + sequence_length);
}
template <typename T>
bool QkvToContext(
const cudaDeviceProp& prop, cublasHandle_t& cublas, cudaStream_t stream,
const int batch_size, const int sequence_length, const int num_heads, const int head_size, const size_t element_size,
const T* input, T* output, T* workspace,
const int* mask_index, const std::vector<int64_t>* mask_index_dims,
bool is_unidirectional, int past_sequence_length, const T* past, T* present) {
const int all_sequence_length = past_sequence_length + sequence_length;
const size_t bytes = GetAttentionScratchSize(element_size, batch_size, num_heads, sequence_length, all_sequence_length);
T* scratch1 = workspace;
T* scratch2 = scratch1 + (bytes / element_size);
T* scratch3 = scratch2 + (bytes / element_size);
const int max_threads_per_block(prop.maxThreadsPerBlock);
// input should be BxSx3xNxH => scratch3: 3xBxNxSxH
if (!LaunchTransQkv(stream, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, input, scratch3)) {
return false;
}
// now scratch3 has Q, K, V: each has size BxNxSxH
const int batches = batch_size * num_heads;
const int size_per_batch = sequence_length * head_size;
const int total_size = batches * size_per_batch;
const T* q = scratch3;
const T* k = q + total_size;
const T* v = k + total_size;
cublasSetStream(cublas, stream);
CublasMathModeSetter helper(prop, cublas, CUBLAS_TENSOR_OP_MATH);
// Concat past (2xBxNxS'xH) to present (2xBxNxS*xH):
// past_k (BxNxS'xH) + k (BxNxSxH) => present_k (BxNxS*xH)
// past_v (BxNxS'xH) + v (BxNxSxH) => present_v (BxNxS*xH)
const int present_size_per_batch = all_sequence_length * head_size;
if (nullptr != present) {
if (!LaunchConcatPastToPresent(stream, all_sequence_length, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, past, k, present)) {
return false;
}
// update pointers to present_k and present_v.
k = present;
v = present + batches * present_size_per_batch;
}
// Raw attention mask could be 2D (BxS) or 3D (BxSxS*)
bool use_raw_attention_mask = (nullptr != mask_index && nullptr != mask_index_dims && mask_index_dims->size() >= 2);
// compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxS*
// Q: BxNxSxH, K (present_k): BxNxS*xH, Q*K': BxNxSxS*
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
const int temp_matrix_size = sequence_length * all_sequence_length;
// For raw attention mask, the scalar if 1/sqrt(H) is moved to softmax computation.
T alpha = (T)(use_raw_attention_mask ? 1.0f : rsqrt_head_size);
if (!CUBLAS_CALL(CublasGemmStridedBatched(
cublas, CUBLAS_OP_T, CUBLAS_OP_N, all_sequence_length, sequence_length, head_size, alpha, k, head_size, present_size_per_batch,
q, head_size, size_per_batch, 0.f, scratch1, all_sequence_length, temp_matrix_size, batches))) {
return false;
}
// apply softmax and store result P to scratch2: BxNxSxS*
if (use_raw_attention_mask) { // 2d or 3d attention mask
if (!ComputeSoftmaxWithRawMask<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, scratch1, scratch2, is_unidirectional, rsqrt_head_size, static_cast<int>(mask_index_dims->size()))) {
return false;
}
} else if (nullptr != mask_index) { // 1d mask index
ORT_ENFORCE(nullptr != mask_index_dims && mask_index_dims->size() == 1);
// mask_index has 1D shape: either (batch_size) or (2*batch_size). Only the later one has start postions.
const int* mask_start = (mask_index_dims->at(0) > batch_size) ? mask_index + batch_size : nullptr;
if (!ComputeSoftmaxWithMask1D<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, mask_index, mask_start, scratch1, scratch2, is_unidirectional)) {
return false;
}
} else { // no mask
if (!ComputeSoftmax<T>(stream, all_sequence_length, sequence_length, batch_size, num_heads, scratch1, scratch2, is_unidirectional)) {
return false;
}
}
// compute P*V (as V*P), and store in scratch3: BxNxSxH
if (!CUBLAS_CALL(CublasGemmStridedBatched(
cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, sequence_length, all_sequence_length, 1.f, v, head_size, present_size_per_batch,
scratch2, all_sequence_length, temp_matrix_size, 0.f, scratch3, head_size, size_per_batch, batches))) {
return false;
}
// scratch3 is BxNxSxH, transpose to output BxSxNxH
return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads, max_threads_per_block, scratch3, output);
}
bool LaunchAttentionKernel(
const cudaDeviceProp& prop,
cudaStream_t stream,
const void* input,
const int* mask_index,
const std::vector<int64_t>* mask_index_dims,
void* output,
const int batch_size,
const int sequence_length,
const int num_heads,
const int head_size,
void* workspace,
cublasHandle_t& cublas,
const size_t element_size,
bool is_unidirectional,
int past_sequence_length,
const void* past,
void* present) {
if (element_size == 2) {
return QkvToContext(prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, element_size,
reinterpret_cast<const half*>(input), reinterpret_cast<half*>(output), reinterpret_cast<half*>(workspace),
mask_index, mask_index_dims, is_unidirectional,
past_sequence_length, reinterpret_cast<const half*>(past), reinterpret_cast<half*>(present));
} else {
return QkvToContext(prop, cublas, stream,
batch_size, sequence_length, num_heads, head_size, element_size,
reinterpret_cast<const float*>(input), reinterpret_cast<float*>(output), reinterpret_cast<float*>(workspace),
mask_index, mask_index_dims, is_unidirectional,
past_sequence_length, reinterpret_cast<const float*>(past), reinterpret_cast<float*>(present));
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
d8a74236c78cc952c7ef666676ec6c0abb26304d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
__global__ void add(int *a,int *b,int *c)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
c[id]=a[id]+b[id];
}
int main()
{
int a[1025],b[1025],c[1025],n;
printf("Enter n: ");
scanf("%d",&n);
printf("Enter A:\n");
for(int i=0;i<n;i++)
a[i]=i;
printf("Enter B:\n");
for(int i=0;i<n;i++)
b[i]=i+1;
int *d_a,*d_b,*d_c;
int size=sizeof(int)*n;
hipMalloc((void**)&d_a,size);
hipMalloc((void**)&d_b,size);
hipMalloc((void**)&d_c,size);
hipMemcpy(d_a,&a,size,hipMemcpyHostToDevice);
hipMemcpy(d_b,&b,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(n),dim3(256), 0, 0, d_a,d_b,d_c);
hipMemcpy(&c,d_c,size,hipMemcpyDeviceToHost);
for(int i=0;i<n;i++)
printf("%d ",c[i]);
printf("\n");
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
| d8a74236c78cc952c7ef666676ec6c0abb26304d.cu | #include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
__global__ void add(int *a,int *b,int *c)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
c[id]=a[id]+b[id];
}
int main()
{
int a[1025],b[1025],c[1025],n;
printf("Enter n: ");
scanf("%d",&n);
printf("Enter A:\n");
for(int i=0;i<n;i++)
a[i]=i;
printf("Enter B:\n");
for(int i=0;i<n;i++)
b[i]=i+1;
int *d_a,*d_b,*d_c;
int size=sizeof(int)*n;
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMalloc((void**)&d_c,size);
cudaMemcpy(d_a,&a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&b,size,cudaMemcpyHostToDevice);
add<<<n,256>>>(d_a,d_b,d_c);
cudaMemcpy(&c,d_c,size,cudaMemcpyDeviceToHost);
for(int i=0;i<n;i++)
printf("%d ",c[i]);
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
7ab8122507f321a2e513a9f22f9d5de941b56ef7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _CPP_CUDA_VISION_KERNEL_PRIMALDUAL_DEPTH_CU_
#define _CPP_CUDA_VISION_KERNEL_PRIMALDUAL_DEPTH_CU_
#include "../kernels/derivatives.h"
#include "cumath.h"
#include <cutil_inline.h>
texture<float, 2, hipReadModeElementType> TexImgCur;
const static hipChannelFormatDesc chandesc_float1 =
hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
__global__ void updateDualReg(
/*float * d_ux, float * d_uy,*/
const float * __restrict__ d_u,
float * __restrict__ d_px, float * __restrict__ d_py,
// const float * __restrict__ d_edgeWeight,
const float sigma, float lambda,float huber_eps,
const int2 imageSize,const size_t stridef1){
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int indexf1 = getImageIdxStrided(x,y,stridef1);
// const float edgeWeight = d_edgeWeight[indexf1];
// lambda = lambda*edgeWeight;
const float u0 = d_u[indexf1];
const float tau =sigma;//1.0f / (4.0f * sigma + huber_eps);
const float2 p_n = make_float2(d_px[indexf1],d_py[indexf1]);
const float2 du_n = make_float2((x == imageSize.x - 1) ? 0: d_u[getImageIdxStrided(x+1,y,stridef1)] - u0,
(y == imageSize.y - 1) ? 0 :d_u[getImageIdxStrided(x,y+1,stridef1)] - u0);
//gradient descent on the dual variable
//l1 on grad
float2 update = (p_n + du_n*tau)/(1+huber_eps*tau);
const float len = fmaxf(1,length(update)/lambda);
//
//l2 on grad
//const float len = 1+sigma;
//float len = fmaxf(1,powf(length(make_float2(px + sigma*ux, py + sigma*uy)),30.0f));
d_px[indexf1] = update.x/len;
d_py[indexf1] = update.y/len;
}
__global__ void updateDualData(
const float * __restrict__ d_u,
const float4 * __restrict__ d_derivs_data,
float * d_q, const float sigma, float lambda,
const size_t stridef1, const size_t stridef4
){
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int indexf1 = getImageIdxStrided(x,y,stridef1);
const int indexf4 = getImageIdxStrided(x,y,stridef4);
const float q = d_q[indexf1];
const float u = d_u[indexf1];
const float4 data = d_derivs_data[indexf4];
const float dIdz = data.x;
const float Id = data.y;
const float z0 = data.z;
const float Ir = data.w;
//l1
float err = (Id-Ir) + (u - z0)*dIdz ;
float q_update = q + sigma*err;//q + lambda*sigma_p.*inner;
//float reprojection_p = fmaxf(1.0,abs(q));
//q = q/reprojection_p ;
const float len = fmaxf(1,fabs(q_update)/(lambda));
//l2
//const float len = (1+sigma)/lambda;
//float len = fmaxf(1,powf(length(make_float2(px + sigma*ux, py + sigma*uy)),2.0f));
d_q[indexf1] = (q_update)/len;
}
__global__ void updatePrimal2Denoise(
float * d_u,
const float4 * __restrict__ d_derivs_data,
float * __restrict__ d_px, float * __restrict__ d_py,
const float * __restrict__ d_q,const float tau,
const int2 imageSize,const size_t stridef1, const size_t stridef4){
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int indexf1 = getImageIdxStrided(x,y,stridef1);
const int indexf4 = getImageIdxStrided(x,y,stridef4);
const float4 data = d_derivs_data[indexf4];
const float dIdz = data.x;
const float div_p = dxm(d_px, x,y,imageSize,stridef1) + dym(d_py,x,y,imageSize,stridef1);
const float u = d_u[indexf1] + tau*div_p - tau*d_q[indexf1]*dIdz;
//d = d + sigma_d*(div_q - lambda*(p.*grad_wrt_d));;
d_u[indexf1] = u;
}
__global__ void updateDataSum(
float * d_datasum,
const float4 * __restrict__ d_derivs_data,
const float * __restrict__ d_q,
const size_t stridef1, const size_t stridef4){
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int indexf1 = getImageIdxStrided(x,y,stridef1);
const int indexf4 = getImageIdxStrided(x,y,stridef4);
const float4 data = d_derivs_data[indexf4];
const float dIdz = data.x;
d_datasum[indexf1] = d_datasum[indexf1]+ d_q[indexf1]*dIdz;
}
__global__ void updateSummedPrimal2Denoise(
float * d_u,
float * d_datasum,
float * __restrict__ d_px, float * __restrict__ d_py,
const float tau,
const int2 imageSize,const size_t stridef1){
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int indexf1 = getImageIdxStrided(x,y,stridef1);
const float div_p = dxm(d_px, x,y,imageSize,stridef1) + dym(d_py,x,y,imageSize,stridef1);
const float u = d_u[indexf1] + tau*div_p - tau*d_datasum[indexf1];
d_u[indexf1] = u;
}
__global__ void cu_compute_dI_dz(float4 * derivs,
cumat<3,3>R,
cumat<3,1>t,
const float2 d_pp, const float2 d_fl,
// const float2 depth_range,
const size_t stridef4,
float *d_ref_img,
float *d_u0,
const size_t stridef1
)
{
const unsigned short x = (blockIdx.x*blockDim.x + threadIdx.x);
const unsigned short y = (blockIdx.y*blockDim.y + threadIdx.y);
const float2 invfl = 1.0f/d_fl;
const float3 uvnorm = make_float3( (x-d_pp.x)*invfl.x,(y-d_pp.y)*invfl.y,1);//*depth_range.y;
cumat<3,1> uvnormMat = {uvnorm.x, uvnorm.y, uvnorm.z};
float zLinearised = d_u0[y*stridef1+x];
zLinearised = fmaxf(0.0f,fminf(1.0f,zLinearised));
// float3 p3d = uvnorm*zLinearised;
cumat<3,1> p3d_r = {uvnorm.x*zLinearised, uvnorm.y*zLinearised, zLinearised};
/// Are we really sure of this?
cumat<3,1> p3d_dest = R*p3d_r + t;
float3 p3d_dest_vec = {p3d_dest(0,0), p3d_dest(1,0), p3d_dest(2,0)};
float2 p2D_live = {p3d_dest(0,0)/p3d_dest(2,0) , p3d_dest(1,0)/p3d_dest(2,0)};
p2D_live.x= p2D_live.x*d_fl.x + d_pp.x;
p2D_live.y= p2D_live.y*d_fl.y + d_pp.y;
float Ir = d_ref_img[y*stridef1+x];
float Id = tex2D(TexImgCur, p2D_live.x+0.5f,p2D_live.y+0.5f);
float Idx = tex2D(TexImgCur, p2D_live.x+0.5f+1.0f,p2D_live.y+0.5f);
float Idy = tex2D(TexImgCur, p2D_live.x+0.5f,p2D_live.y+0.5f+1.0f);
//dI/dx spatial derivative in x and y
float2 dIdx = make_float2(Idx-Id, Idy-Id);
//dK.pi/dX
// float3 dpi_u = make_float3(d_fl.x/p3d_dest.z, 0, -(d_fl.x*p3d_dest.x)/(p3d_dest.z*p3d_dest.z));
// float3 dpi_v = make_float3(0, d_fl.y/p3d_dest.z, -(d_fl.y*p3d_dest.y)/(p3d_dest.z*p3d_dest.z));
//dX/dz d(R.kinv.[x,y,1]z + t)/dz = R.kinv.[x,y,1] .
//using z in linear depth
//float3 dXdz = multiplySO3(T_k_ref_,uvnorm);
//using z in inverse depth
// float3 dXdz = multiplySO3(T_k_ref_,uvnorm);//*(-1.0f/(zLinearised*zLinearised));
//chain rule
// float dIdz = dot(dIdx, make_float2( dot(dXdz,dpi_u), dot(dXdz,dpi_v) ) );
// derivs[y*stridef4 + x] = make_float4(dIdz,Id,zLinearised,Ir);
p3d_dest_vec.z = p3d_dest_vec.z + 10E-6;
// float3 dpi_u = make_float3(1/p3d_dest_vec.z, 0,-(p3d_dest_vec.x)/(p3d_dest_vec.z*p3d_dest_vec.z));
// float3 dpi_v = make_float3(0, 1/p3d_dest_vec.z,-(p3d_dest_vec.y)/(p3d_dest_vec.z*p3d_dest_vec.z));
float3 dpi_u = make_float3(d_fl.x/p3d_dest_vec.z, 0,-(d_fl.x*p3d_dest_vec.x)/(p3d_dest_vec.z*p3d_dest_vec.z));
float3 dpi_v = make_float3(0, d_fl.y/p3d_dest_vec.z,-(d_fl.y*p3d_dest_vec.y)/(p3d_dest_vec.z*p3d_dest_vec.z));
cumat<3,1> dXdz = R*uvnormMat;///(zLinearised*zLinearised);
float3 dXdz_vec = {dXdz(0,0),dXdz(1,0),dXdz(2,0)};
float dIdz = dot(dIdx, make_float2( dot(dXdz_vec,dpi_u), dot(dXdz_vec,dpi_v) ) );
derivs[y*stridef4+x] = make_float4(dIdz,Id,zLinearised,Ir);
}
void BindDepthTexture(float* cur_img,
unsigned int width,
unsigned int height,
unsigned int imgStride)
{
hipBindTexture2D(0,TexImgCur,cur_img,chandesc_float1,width, height,imgStride*sizeof(float));
TexImgCur.addressMode[0] = hipAddressModeClamp;
TexImgCur.addressMode[1] = hipAddressModeClamp;
TexImgCur.filterMode = hipFilterModeLinear;
TexImgCur.normalized = false; // access with normalized texture coordinates
}
//void TVL1Depth::init(){
// std::cout << "Init " <<std::endl;
// iu::setValue(0,dual_reg_x,dual_reg_x->roi());
// iu::setValue(0,dual_reg_y,dual_reg_x->roi());
// iu::setValue(0.5,primal_reg,primal_reg->roi());
//// iu::ImageCpu_32f_C1 randim(primal_reg->width(),primal_reg->height());
//// float * d = randim.data();
//// for(int i =0 ; i < randim.numel();i++){
//// d[i] = (float)drand48();
//// }
//// iu::copy(&randim,primal_reg);
// for(int k = 0 ; k < dual_data.size();k++){
// iu::setValue(0.5,dual_data.at(k),dual_data.at(k)->roi());
// }
// //iu::copy(this->data_init,this->primal_reg);
// //iu::copy(this->data_init,this->dual_data);
// // iu::filterGauss(data,this->ureg,data->roi(),1);
// // iu::filterGauss(data,this->udata,data->roi(),1);
//}
//void TVL1Depth::computeMultiDepth(std::vector<FrameData *> & dataTerms, int reference, bool derivCheck){
// //ScopedCuTimer time("denoise");
// int2 imageSize_ = make_int2(primal_reg->width(),primal_reg->height());
// dim3 blockdim(boost::math::gcd<unsigned>(imageSize_.x,32), boost::math::gcd<unsigned>(imageSize_.y,32), 1);
// dim3 griddim( imageSize_.x / blockdim.x, imageSize_.y / blockdim.y);
// for(int o = 0 ; o<outerIterations;o++){
// //compute using the current linearisation point
// //derivatives and image prediction.
// FrameData * frameRef = dataTerms[reference];
// for(int k = 0 ; k < dataTerms.size();k++ ){
// if(k!=reference){
// FrameData * frameData = dataTerms[k];
// mvs->compute_dI_dz(mvs->camera,
// frameRef->grey_st_32f,frameRef->h_T_wf,
// frameData->grey_st_32f,frameData->h_T_wf,
// primal_reg,data_derivs.at(k), derivCheck
// );
// }
// }
// for(int i = 0 ; i < innerIterations; i++)
// {
// //update dual variable reg
// updateDualReg<<<griddim,blockdim>>>(primal_reg->data(),dual_reg_x->data(),dual_reg_y->data(),nrg->data(),sigma_dual,1,epsilon,imageSize_,
// primal_reg->stride());
// //update each dual variable data
// for(int k = 0 ; k < dataTerms.size();k++ ){
// if(k!=reference){
// updateDualData<<<griddim,blockdim>>>(primal_reg->data(),data_derivs.at(k)->data(), dual_data.at(k)->data(), sigma_dual, 1/lambda, primal_reg->stride(),
// dual_data.at(k)->stride());
// }
// }
// //update primal variable
// // updatePrimal2Denoise<<<griddim,blockdim>>>(primal_reg->data(),data_derivs->data(),dual_reg_x->data(),dual_reg_y->data(),dual_data->data(),
// // sigma_data,imageSize_,primal_reg->stride(),data_derivs->stride());
// iu::setValue(0,datasum,datasum->roi());
// for(int k = 0 ; k < dataTerms.size();k++ ){
// if(k!=reference){
// updateDataSum<<<griddim,blockdim>>>(datasum->data(), data_derivs.at(k)->data(), dual_data.at(k)->data(),
// primal_reg->stride(), dual_data.at(k)->stride());
// }
// }
// float scaleData =1.0f/dataTerms.size();
// iu::mulC(datasum,scaleData,datasum,datasum->roi());
// updateSummedPrimal2Denoise<<<griddim,blockdim>>>(primal_reg->data(),datasum->data(),dual_reg_x->data(),dual_reg_y->data(), sigma_data,imageSize_,primal_reg->stride() );
// }
// }
//}
//void TVL1Depth::computeDepth(std::vector<FrameData *> & dataTerms, int reference, int dataterm){
//}
///*void TVL1Depth::computeDepth(std::vector<FrameData *> & dataTerms, int reference, int dataterm){
// //ScopedCuTimer time("denoise");
// int2 imageSize_ = make_int2(primal_reg->width(),primal_reg->height());
// dim3 blockdim(boost::math::gcd<unsigned>(imageSize_.x,32), boost::math::gcd<unsigned>(imageSize_.y,32), 1);
// dim3 griddim( imageSize_.x / blockdim.x, imageSize_.y / blockdim.y);
// for(int o = 0 ; o<outerIterations;o++){
// //compute using the current linearisation point
// //derivatives and image prediction.
// FrameData * frameRef = dataTerms[reference];
// FrameData * frameData = dataTerms[dataterm];
// mvs->compute_dI_dz(mvs->camera,
// frameRef->grey_st_32f,frameRef->h_T_wf,
// frameData->grey_st_32f,frameData->h_T_wf,
// primal_reg,data_derivs
// );
// for(int i = 0 ; i < innerIterations; i++)
// {
// //update dual variable reg
// updateDualReg<<<griddim,blockdim>>>(primal_reg->data(),dual_reg_x->data(),dual_reg_y->data(),nrg->data(),sigma_dual,1,epsilon,imageSize_,
// primal_reg->stride());
// //update dual variable data
// updateDualData<<<griddim,blockdim>>>(primal_reg->data(),data_derivs->data(), dual_data->data(), sigma_dual, 1/lambda, primal_reg->stride(),
// dual_data->stride());
// //update primal variable
// updatePrimal2Denoise<<<griddim,blockdim>>>(primal_reg->data(),data_derivs->data(),dual_reg_x->data(),dual_reg_y->data(),dual_data->data(),
// sigma_data,imageSize_,primal_reg->stride(),data_derivs->stride());
// }
// }
//}*/
//void TVL1Depth::updateEdge(iu::ImageGpu_32f_C1 * image){
// iu::filterEdge(image,
// this->nrg,
// this->nrg->roi(),
// this->alpha,this->beta,0.0001);
//}
//void TVL1Depth::updateEdge(iu::ImageGpu_32f_C4 * image){
// iu::filterEdge(image,
// this->nrg,
// this->nrg->roi(),
// this->alpha,this->beta,0.0001);
//}
//void TVL1Depth::updateImage(iu::ImageGpu_8u_C1 *image){
// iu::convert_8u32f_C1(image,image->roi(),this->data_init,this->data_init->roi());
// //iu::copy(image,this->data);
//}
//void TVL1Depth::updateImage(iu::ImageGpu_32f_C1 *image){
// // iu::convert_8u32f_C1(image,image->roi(),this->data,this->data->roi());
// iu::copy(image,this->data_init);
//}
#endif
| 7ab8122507f321a2e513a9f22f9d5de941b56ef7.cu | #ifndef _CPP_CUDA_VISION_KERNEL_PRIMALDUAL_DEPTH_CU_
#define _CPP_CUDA_VISION_KERNEL_PRIMALDUAL_DEPTH_CU_
#include "../kernels/derivatives.h"
#include "cumath.h"
#include <cutil_inline.h>
texture<float, 2, cudaReadModeElementType> TexImgCur;
const static cudaChannelFormatDesc chandesc_float1 =
cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
__global__ void updateDualReg(
/*float * d_ux, float * d_uy,*/
const float * __restrict__ d_u,
float * __restrict__ d_px, float * __restrict__ d_py,
// const float * __restrict__ d_edgeWeight,
const float sigma, float lambda,float huber_eps,
const int2 imageSize,const size_t stridef1){
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int indexf1 = getImageIdxStrided(x,y,stridef1);
// const float edgeWeight = d_edgeWeight[indexf1];
// lambda = lambda*edgeWeight;
const float u0 = d_u[indexf1];
const float tau =sigma;//1.0f / (4.0f * sigma + huber_eps);
const float2 p_n = make_float2(d_px[indexf1],d_py[indexf1]);
const float2 du_n = make_float2((x == imageSize.x - 1) ? 0: d_u[getImageIdxStrided(x+1,y,stridef1)] - u0,
(y == imageSize.y - 1) ? 0 :d_u[getImageIdxStrided(x,y+1,stridef1)] - u0);
//gradient descent on the dual variable
//l1 on grad
float2 update = (p_n + du_n*tau)/(1+huber_eps*tau);
const float len = fmaxf(1,length(update)/lambda);
//
//l2 on grad
//const float len = 1+sigma;
//float len = fmaxf(1,powf(length(make_float2(px + sigma*ux, py + sigma*uy)),30.0f));
d_px[indexf1] = update.x/len;
d_py[indexf1] = update.y/len;
}
__global__ void updateDualData(
const float * __restrict__ d_u,
const float4 * __restrict__ d_derivs_data,
float * d_q, const float sigma, float lambda,
const size_t stridef1, const size_t stridef4
){
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int indexf1 = getImageIdxStrided(x,y,stridef1);
const int indexf4 = getImageIdxStrided(x,y,stridef4);
const float q = d_q[indexf1];
const float u = d_u[indexf1];
const float4 data = d_derivs_data[indexf4];
const float dIdz = data.x;
const float Id = data.y;
const float z0 = data.z;
const float Ir = data.w;
//l1
float err = (Id-Ir) + (u - z0)*dIdz ;
float q_update = q + sigma*err;//q + lambda*sigma_p.*inner;
//float reprojection_p = fmaxf(1.0,abs(q));
//q = q/reprojection_p ;
const float len = fmaxf(1,fabs(q_update)/(lambda));
//l2
//const float len = (1+sigma)/lambda;
//float len = fmaxf(1,powf(length(make_float2(px + sigma*ux, py + sigma*uy)),2.0f));
d_q[indexf1] = (q_update)/len;
}
__global__ void updatePrimal2Denoise(
float * d_u,
const float4 * __restrict__ d_derivs_data,
float * __restrict__ d_px, float * __restrict__ d_py,
const float * __restrict__ d_q,const float tau,
const int2 imageSize,const size_t stridef1, const size_t stridef4){
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int indexf1 = getImageIdxStrided(x,y,stridef1);
const int indexf4 = getImageIdxStrided(x,y,stridef4);
const float4 data = d_derivs_data[indexf4];
const float dIdz = data.x;
const float div_p = dxm(d_px, x,y,imageSize,stridef1) + dym(d_py,x,y,imageSize,stridef1);
const float u = d_u[indexf1] + tau*div_p - tau*d_q[indexf1]*dIdz;
//d = d + sigma_d*(div_q - lambda*(p.*grad_wrt_d));;
d_u[indexf1] = u;
}
__global__ void updateDataSum(
float * d_datasum,
const float4 * __restrict__ d_derivs_data,
const float * __restrict__ d_q,
const size_t stridef1, const size_t stridef4){
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int indexf1 = getImageIdxStrided(x,y,stridef1);
const int indexf4 = getImageIdxStrided(x,y,stridef4);
const float4 data = d_derivs_data[indexf4];
const float dIdz = data.x;
d_datasum[indexf1] = d_datasum[indexf1]+ d_q[indexf1]*dIdz;
}
__global__ void updateSummedPrimal2Denoise(
float * d_u,
float * d_datasum,
float * __restrict__ d_px, float * __restrict__ d_py,
const float tau,
const int2 imageSize,const size_t stridef1){
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int indexf1 = getImageIdxStrided(x,y,stridef1);
const float div_p = dxm(d_px, x,y,imageSize,stridef1) + dym(d_py,x,y,imageSize,stridef1);
const float u = d_u[indexf1] + tau*div_p - tau*d_datasum[indexf1];
d_u[indexf1] = u;
}
__global__ void cu_compute_dI_dz(float4 * derivs,
cumat<3,3>R,
cumat<3,1>t,
const float2 d_pp, const float2 d_fl,
// const float2 depth_range,
const size_t stridef4,
float *d_ref_img,
float *d_u0,
const size_t stridef1
)
{
const unsigned short x = (blockIdx.x*blockDim.x + threadIdx.x);
const unsigned short y = (blockIdx.y*blockDim.y + threadIdx.y);
const float2 invfl = 1.0f/d_fl;
const float3 uvnorm = make_float3( (x-d_pp.x)*invfl.x,(y-d_pp.y)*invfl.y,1);//*depth_range.y;
cumat<3,1> uvnormMat = {uvnorm.x, uvnorm.y, uvnorm.z};
float zLinearised = d_u0[y*stridef1+x];
zLinearised = fmaxf(0.0f,fminf(1.0f,zLinearised));
// float3 p3d = uvnorm*zLinearised;
cumat<3,1> p3d_r = {uvnorm.x*zLinearised, uvnorm.y*zLinearised, zLinearised};
/// Are we really sure of this?
cumat<3,1> p3d_dest = R*p3d_r + t;
float3 p3d_dest_vec = {p3d_dest(0,0), p3d_dest(1,0), p3d_dest(2,0)};
float2 p2D_live = {p3d_dest(0,0)/p3d_dest(2,0) , p3d_dest(1,0)/p3d_dest(2,0)};
p2D_live.x= p2D_live.x*d_fl.x + d_pp.x;
p2D_live.y= p2D_live.y*d_fl.y + d_pp.y;
float Ir = d_ref_img[y*stridef1+x];
float Id = tex2D(TexImgCur, p2D_live.x+0.5f,p2D_live.y+0.5f);
float Idx = tex2D(TexImgCur, p2D_live.x+0.5f+1.0f,p2D_live.y+0.5f);
float Idy = tex2D(TexImgCur, p2D_live.x+0.5f,p2D_live.y+0.5f+1.0f);
//dI/dx spatial derivative in x and y
float2 dIdx = make_float2(Idx-Id, Idy-Id);
//dK.pi/dX
// float3 dpi_u = make_float3(d_fl.x/p3d_dest.z, 0, -(d_fl.x*p3d_dest.x)/(p3d_dest.z*p3d_dest.z));
// float3 dpi_v = make_float3(0, d_fl.y/p3d_dest.z, -(d_fl.y*p3d_dest.y)/(p3d_dest.z*p3d_dest.z));
//dX/dz d(R.kinv.[x,y,1]z + t)/dz = R.kinv.[x,y,1] .
//using z in linear depth
//float3 dXdz = multiplySO3(T_k_ref_,uvnorm);
//using z in inverse depth
// float3 dXdz = multiplySO3(T_k_ref_,uvnorm);//*(-1.0f/(zLinearised*zLinearised));
//chain rule
// float dIdz = dot(dIdx, make_float2( dot(dXdz,dpi_u), dot(dXdz,dpi_v) ) );
// derivs[y*stridef4 + x] = make_float4(dIdz,Id,zLinearised,Ir);
p3d_dest_vec.z = p3d_dest_vec.z + 10E-6;
// float3 dpi_u = make_float3(1/p3d_dest_vec.z, 0,-(p3d_dest_vec.x)/(p3d_dest_vec.z*p3d_dest_vec.z));
// float3 dpi_v = make_float3(0, 1/p3d_dest_vec.z,-(p3d_dest_vec.y)/(p3d_dest_vec.z*p3d_dest_vec.z));
float3 dpi_u = make_float3(d_fl.x/p3d_dest_vec.z, 0,-(d_fl.x*p3d_dest_vec.x)/(p3d_dest_vec.z*p3d_dest_vec.z));
float3 dpi_v = make_float3(0, d_fl.y/p3d_dest_vec.z,-(d_fl.y*p3d_dest_vec.y)/(p3d_dest_vec.z*p3d_dest_vec.z));
cumat<3,1> dXdz = R*uvnormMat;///(zLinearised*zLinearised);
float3 dXdz_vec = {dXdz(0,0),dXdz(1,0),dXdz(2,0)};
float dIdz = dot(dIdx, make_float2( dot(dXdz_vec,dpi_u), dot(dXdz_vec,dpi_v) ) );
derivs[y*stridef4+x] = make_float4(dIdz,Id,zLinearised,Ir);
}
void BindDepthTexture(float* cur_img,
unsigned int width,
unsigned int height,
unsigned int imgStride)
{
cudaBindTexture2D(0,TexImgCur,cur_img,chandesc_float1,width, height,imgStride*sizeof(float));
TexImgCur.addressMode[0] = cudaAddressModeClamp;
TexImgCur.addressMode[1] = cudaAddressModeClamp;
TexImgCur.filterMode = cudaFilterModeLinear;
TexImgCur.normalized = false; // access with normalized texture coordinates
}
//void TVL1Depth::init(){
// std::cout << "Init " <<std::endl;
// iu::setValue(0,dual_reg_x,dual_reg_x->roi());
// iu::setValue(0,dual_reg_y,dual_reg_x->roi());
// iu::setValue(0.5,primal_reg,primal_reg->roi());
//// iu::ImageCpu_32f_C1 randim(primal_reg->width(),primal_reg->height());
//// float * d = randim.data();
//// for(int i =0 ; i < randim.numel();i++){
//// d[i] = (float)drand48();
//// }
//// iu::copy(&randim,primal_reg);
// for(int k = 0 ; k < dual_data.size();k++){
// iu::setValue(0.5,dual_data.at(k),dual_data.at(k)->roi());
// }
// //iu::copy(this->data_init,this->primal_reg);
// //iu::copy(this->data_init,this->dual_data);
// // iu::filterGauss(data,this->ureg,data->roi(),1);
// // iu::filterGauss(data,this->udata,data->roi(),1);
//}
//void TVL1Depth::computeMultiDepth(std::vector<FrameData *> & dataTerms, int reference, bool derivCheck){
// //ScopedCuTimer time("denoise");
// int2 imageSize_ = make_int2(primal_reg->width(),primal_reg->height());
// dim3 blockdim(boost::math::gcd<unsigned>(imageSize_.x,32), boost::math::gcd<unsigned>(imageSize_.y,32), 1);
// dim3 griddim( imageSize_.x / blockdim.x, imageSize_.y / blockdim.y);
// for(int o = 0 ; o<outerIterations;o++){
// //compute using the current linearisation point
// //derivatives and image prediction.
// FrameData * frameRef = dataTerms[reference];
// for(int k = 0 ; k < dataTerms.size();k++ ){
// if(k!=reference){
// FrameData * frameData = dataTerms[k];
// mvs->compute_dI_dz(mvs->camera,
// frameRef->grey_st_32f,frameRef->h_T_wf,
// frameData->grey_st_32f,frameData->h_T_wf,
// primal_reg,data_derivs.at(k), derivCheck
// );
// }
// }
// for(int i = 0 ; i < innerIterations; i++)
// {
// //update dual variable reg
// updateDualReg<<<griddim,blockdim>>>(primal_reg->data(),dual_reg_x->data(),dual_reg_y->data(),nrg->data(),sigma_dual,1,epsilon,imageSize_,
// primal_reg->stride());
// //update each dual variable data
// for(int k = 0 ; k < dataTerms.size();k++ ){
// if(k!=reference){
// updateDualData<<<griddim,blockdim>>>(primal_reg->data(),data_derivs.at(k)->data(), dual_data.at(k)->data(), sigma_dual, 1/lambda, primal_reg->stride(),
// dual_data.at(k)->stride());
// }
// }
// //update primal variable
// // updatePrimal2Denoise<<<griddim,blockdim>>>(primal_reg->data(),data_derivs->data(),dual_reg_x->data(),dual_reg_y->data(),dual_data->data(),
// // sigma_data,imageSize_,primal_reg->stride(),data_derivs->stride());
// iu::setValue(0,datasum,datasum->roi());
// for(int k = 0 ; k < dataTerms.size();k++ ){
// if(k!=reference){
// updateDataSum<<<griddim,blockdim>>>(datasum->data(), data_derivs.at(k)->data(), dual_data.at(k)->data(),
// primal_reg->stride(), dual_data.at(k)->stride());
// }
// }
// float scaleData =1.0f/dataTerms.size();
// iu::mulC(datasum,scaleData,datasum,datasum->roi());
// updateSummedPrimal2Denoise<<<griddim,blockdim>>>(primal_reg->data(),datasum->data(),dual_reg_x->data(),dual_reg_y->data(), sigma_data,imageSize_,primal_reg->stride() );
// }
// }
//}
//void TVL1Depth::computeDepth(std::vector<FrameData *> & dataTerms, int reference, int dataterm){
//}
///*void TVL1Depth::computeDepth(std::vector<FrameData *> & dataTerms, int reference, int dataterm){
// //ScopedCuTimer time("denoise");
// int2 imageSize_ = make_int2(primal_reg->width(),primal_reg->height());
// dim3 blockdim(boost::math::gcd<unsigned>(imageSize_.x,32), boost::math::gcd<unsigned>(imageSize_.y,32), 1);
// dim3 griddim( imageSize_.x / blockdim.x, imageSize_.y / blockdim.y);
// for(int o = 0 ; o<outerIterations;o++){
// //compute using the current linearisation point
// //derivatives and image prediction.
// FrameData * frameRef = dataTerms[reference];
// FrameData * frameData = dataTerms[dataterm];
// mvs->compute_dI_dz(mvs->camera,
// frameRef->grey_st_32f,frameRef->h_T_wf,
// frameData->grey_st_32f,frameData->h_T_wf,
// primal_reg,data_derivs
// );
// for(int i = 0 ; i < innerIterations; i++)
// {
// //update dual variable reg
// updateDualReg<<<griddim,blockdim>>>(primal_reg->data(),dual_reg_x->data(),dual_reg_y->data(),nrg->data(),sigma_dual,1,epsilon,imageSize_,
// primal_reg->stride());
// //update dual variable data
// updateDualData<<<griddim,blockdim>>>(primal_reg->data(),data_derivs->data(), dual_data->data(), sigma_dual, 1/lambda, primal_reg->stride(),
// dual_data->stride());
// //update primal variable
// updatePrimal2Denoise<<<griddim,blockdim>>>(primal_reg->data(),data_derivs->data(),dual_reg_x->data(),dual_reg_y->data(),dual_data->data(),
// sigma_data,imageSize_,primal_reg->stride(),data_derivs->stride());
// }
// }
//}*/
//void TVL1Depth::updateEdge(iu::ImageGpu_32f_C1 * image){
// iu::filterEdge(image,
// this->nrg,
// this->nrg->roi(),
// this->alpha,this->beta,0.0001);
//}
//void TVL1Depth::updateEdge(iu::ImageGpu_32f_C4 * image){
// iu::filterEdge(image,
// this->nrg,
// this->nrg->roi(),
// this->alpha,this->beta,0.0001);
//}
//void TVL1Depth::updateImage(iu::ImageGpu_8u_C1 *image){
// iu::convert_8u32f_C1(image,image->roi(),this->data_init,this->data_init->roi());
// //iu::copy(image,this->data);
//}
//void TVL1Depth::updateImage(iu::ImageGpu_32f_C1 *image){
// // iu::convert_8u32f_C1(image,image->roi(),this->data,this->data->roi());
// iu::copy(image,this->data_init);
//}
#endif
|
e1aca262127bb5a26efae168cadb0cafd6992741.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/sequence_pooling.h"
namespace paddle {
namespace operators {
namespace math {
#define FLT_MAX __FLT_MAX__
template <typename T>
__global__ void KeMaxSequencePool(const T* input, const size_t* starts,
T* output, int* index, int64_t num_seq,
int64_t dim) {
int dim_idx = threadIdx.x;
int seq_id = blockIdx.x;
if (seq_id >= num_seq) return;
size_t start = starts[seq_id];
size_t end = starts[seq_id + 1];
for (int64_t i = dim_idx; i < dim; i += blockDim.x) {
T max_val = static_cast<T>(-FLT_MAX);
int max_id = -1;
for (size_t step_id = start; step_id < end; step_id++) {
if (max_val < input[step_id * dim + i]) {
max_val = input[step_id * dim + i];
max_id = step_id;
}
}
output[seq_id * dim + i] = max_val;
index[seq_id * dim + i] = max_id;
}
}
template <typename T>
class MaxSeqPoolFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::LoDTensor& input, framework::Tensor* output,
framework::Tensor* index) {
auto in_dims = input.dims();
auto out_dims = output->dims();
auto idx_dims = index->dims();
PADDLE_ENFORCE_GT(in_dims.size(), static_cast<int64_t>(1));
PADDLE_ENFORCE_GT(out_dims.size(), 1);
for (int64_t i = 1; i < in_dims.size(); ++i) {
PADDLE_ENFORCE_EQ(in_dims[i], out_dims[i]);
}
PADDLE_ENFORCE_EQ(idx_dims, out_dims);
auto starts = input.lod()[0];
const T* in_data = input.data<T>();
T* out_data = output->data<T>();
int* max_index = index->data<int>();
int64_t num_seq = out_dims[0];
int64_t dim = output->numel() / num_seq;
dim3 threads(256, 1);
dim3 grid(num_seq, 1);
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(context).stream();
hipLaunchKernelGGL(( KeMaxSequencePool<T>), dim3(grid), dim3(threads), 0, stream,
in_data, starts.data(), out_data, max_index, num_seq, dim);
}
};
template <typename T>
__global__ void KeMaxSequencePoolGrad(const T* out_grad, const int* max_index,
T* in_grad, int64_t num_seq,
int64_t dim) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int col_idx = idx % dim;
if (idx < num_seq * dim) {
int step_id = max_index[idx];
in_grad[step_id * dim + col_idx] = out_grad[idx];
}
}
template <typename T>
class MaxSeqPoolGradFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& out_grad,
const framework::Tensor& index,
framework::LoDTensor* in_grad) {
auto og_dims = out_grad.dims();
auto idx_dims = index.dims();
auto ig_dims = in_grad->dims();
PADDLE_ENFORCE_GT(og_dims.size(), static_cast<int64_t>(1));
PADDLE_ENFORCE_GT(ig_dims.size(), static_cast<int64_t>(1));
for (int64_t i = 1; i < og_dims.size(); ++i) {
PADDLE_ENFORCE_EQ(og_dims[i], ig_dims[i]);
}
PADDLE_ENFORCE_EQ(idx_dims, og_dims);
const T* og_data = out_grad.data<T>();
const int* max_index = index.data<int>();
T* ig_data = in_grad->data<T>();
SetConstant<platform::GPUPlace, T> set_zero;
set_zero(context, in_grad, static_cast<T>(0.0));
int64_t num_seq = og_dims[0];
int64_t dim = out_grad.numel() / num_seq;
unsigned int blocks = (num_seq * dim + 128 - 1) / 128;
dim3 threads(128, 1);
dim3 grid(blocks, 1);
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(context).stream();
hipLaunchKernelGGL(( KeMaxSequencePoolGrad<T>), dim3(grid), dim3(threads), 0, stream,
og_data, max_index, ig_data, num_seq, dim);
}
};
template class MaxSeqPoolFunctor<platform::GPUPlace, float>;
template class MaxSeqPoolFunctor<platform::GPUPlace, double>;
template class MaxSeqPoolGradFunctor<platform::GPUPlace, float>;
template class MaxSeqPoolGradFunctor<platform::GPUPlace, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| e1aca262127bb5a26efae168cadb0cafd6992741.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/sequence_pooling.h"
namespace paddle {
namespace operators {
namespace math {
#define FLT_MAX __FLT_MAX__
template <typename T>
__global__ void KeMaxSequencePool(const T* input, const size_t* starts,
T* output, int* index, int64_t num_seq,
int64_t dim) {
int dim_idx = threadIdx.x;
int seq_id = blockIdx.x;
if (seq_id >= num_seq) return;
size_t start = starts[seq_id];
size_t end = starts[seq_id + 1];
for (int64_t i = dim_idx; i < dim; i += blockDim.x) {
T max_val = static_cast<T>(-FLT_MAX);
int max_id = -1;
for (size_t step_id = start; step_id < end; step_id++) {
if (max_val < input[step_id * dim + i]) {
max_val = input[step_id * dim + i];
max_id = step_id;
}
}
output[seq_id * dim + i] = max_val;
index[seq_id * dim + i] = max_id;
}
}
template <typename T>
class MaxSeqPoolFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::LoDTensor& input, framework::Tensor* output,
framework::Tensor* index) {
auto in_dims = input.dims();
auto out_dims = output->dims();
auto idx_dims = index->dims();
PADDLE_ENFORCE_GT(in_dims.size(), static_cast<int64_t>(1));
PADDLE_ENFORCE_GT(out_dims.size(), 1);
for (int64_t i = 1; i < in_dims.size(); ++i) {
PADDLE_ENFORCE_EQ(in_dims[i], out_dims[i]);
}
PADDLE_ENFORCE_EQ(idx_dims, out_dims);
auto starts = input.lod()[0];
const T* in_data = input.data<T>();
T* out_data = output->data<T>();
int* max_index = index->data<int>();
int64_t num_seq = out_dims[0];
int64_t dim = output->numel() / num_seq;
dim3 threads(256, 1);
dim3 grid(num_seq, 1);
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(context).stream();
KeMaxSequencePool<T><<<grid, threads, 0, stream>>>(
in_data, starts.data(), out_data, max_index, num_seq, dim);
}
};
template <typename T>
__global__ void KeMaxSequencePoolGrad(const T* out_grad, const int* max_index,
T* in_grad, int64_t num_seq,
int64_t dim) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int col_idx = idx % dim;
if (idx < num_seq * dim) {
int step_id = max_index[idx];
in_grad[step_id * dim + col_idx] = out_grad[idx];
}
}
template <typename T>
class MaxSeqPoolGradFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& out_grad,
const framework::Tensor& index,
framework::LoDTensor* in_grad) {
auto og_dims = out_grad.dims();
auto idx_dims = index.dims();
auto ig_dims = in_grad->dims();
PADDLE_ENFORCE_GT(og_dims.size(), static_cast<int64_t>(1));
PADDLE_ENFORCE_GT(ig_dims.size(), static_cast<int64_t>(1));
for (int64_t i = 1; i < og_dims.size(); ++i) {
PADDLE_ENFORCE_EQ(og_dims[i], ig_dims[i]);
}
PADDLE_ENFORCE_EQ(idx_dims, og_dims);
const T* og_data = out_grad.data<T>();
const int* max_index = index.data<int>();
T* ig_data = in_grad->data<T>();
SetConstant<platform::GPUPlace, T> set_zero;
set_zero(context, in_grad, static_cast<T>(0.0));
int64_t num_seq = og_dims[0];
int64_t dim = out_grad.numel() / num_seq;
unsigned int blocks = (num_seq * dim + 128 - 1) / 128;
dim3 threads(128, 1);
dim3 grid(blocks, 1);
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(context).stream();
KeMaxSequencePoolGrad<T><<<grid, threads, 0, stream>>>(
og_data, max_index, ig_data, num_seq, dim);
}
};
template class MaxSeqPoolFunctor<platform::GPUPlace, float>;
template class MaxSeqPoolFunctor<platform::GPUPlace, double>;
template class MaxSeqPoolGradFunctor<platform::GPUPlace, float>;
template class MaxSeqPoolGradFunctor<platform::GPUPlace, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
0a1ef37c963693319ba3a06294a144c36192986d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <sys/time.h>
#include <omp.h>
#define TILE_DIM 32
using namespace std;
/* Multiple GPUs
* - distribute GPUs to OpenMP threads
* - distribute data (computation) to GPUs
* - (export CUDA_VISIBLE_DEVICES=1,2)
*/
__global__ void matmul_shared(double *a, double* b, double *c, int aw, int bw) {
__shared__ double aTile[TILE_DIM][TILE_DIM], bTile[TILE_DIM][TILE_DIM];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0.0;
for (int ia=0; ia<aw; ia+=TILE_DIM) {
aTile[threadIdx.y][threadIdx.x] = a[row*aw + ia + threadIdx.x];
bTile[threadIdx.y][threadIdx.x] = b[(ia+threadIdx.y)*bw+col];
__syncthreads();
for (int i = 0; i < TILE_DIM; i++) {
sum += aTile[threadIdx.y][i]* bTile[i][threadIdx.x];
}
__syncthreads();
}
c[row*bw+col] = sum;
}
int main() {
//---------------------------------------------------------------
// number of gpus
int num_gpus = 0;
// (*) get number of devices using hipGetDeviceCount
hipGetDeviceCount(&num_gpus);
cout << "number of host CPUs : " << omp_get_num_procs() << endl;
cout << "number of CUDA devices: " << num_gpus << endl;
// set number of threads
omp_set_num_threads(num_gpus);
//---------------------------------------------------------------
// number of matrix multiplication operations
int nMultiply = 10;
// work distribution to devices (two devices assumed)
// (*) find optimum distribution
int nMultiplyThread[] = {7,3};
time_t sTime = time(NULL);
timeval tt1[num_gpus], tt2[num_gpus];
int ms[num_gpus];
double fms[num_gpus];
int ah=2560;
int aw=2560;
int bh=aw;
int bw=2560;
// host arrays
double *a = (double*)malloc(nMultiply * ah*aw*sizeof(double));
double *b = (double*)malloc(nMultiply * bh*bw*sizeof(double));
double *c = (double*)malloc(nMultiply * ah*bw*sizeof(double));
for (int k=0;k<nMultiply;k++) {
for (int i=0;i<ah;i++)
for (int j=0;j<aw;j++)
a[i*ah+j+ k*ah*aw] = (double)(i+j)*(k+1);
for (int i=0;i<bh;i++)
for (int j=0;j<bw;j++)
b[i*bh+j + k*bh*bw] = (double)(i-j)*(k+1);
}
#pragma omp parallel
{
// get thread id
int cpu_thread_id = omp_get_thread_num();
int gpu_id = -1;
// (*) set GPU device for this CPU thread
hipSetDevice(cpu_thread_id % num_gpus);
hipGetDevice(&gpu_id);
cout << "Thread : " << cpu_thread_id << " uses GPU # " << gpu_id << endl;
// number of multiplies for this CPU thread
int nMultiply_thread = nMultiplyThread[cpu_thread_id];
// device arrays
double *a_dev;
hipMalloc((void**) &a_dev, nMultiply_thread * ah*aw * sizeof(double));
double *b_dev;
hipMalloc((void**) &b_dev, nMultiply_thread * bh*bw * sizeof(double));
double *c_dev;
hipMalloc((void**) &c_dev, nMultiply_thread * ah*bw * sizeof(double));
// copy to device
// each thread calculates its host data offset
int previous = 0;
for (int pt=0; pt<cpu_thread_id; pt++)
previous += nMultiplyThread[pt];
int offsetA = previous * ah*aw;
int offsetB = previous * bh*bw;
int offsetC = previous * ah*bw;
hipDeviceSynchronize();
gettimeofday( &tt1[cpu_thread_id], NULL );
// (*) fill host pointers
hipMemcpy(a_dev, a + offsetA, nMultiply_thread * ah*aw * sizeof(double) , hipMemcpyHostToDevice);
hipMemcpy(b_dev, b + offsetB, nMultiply_thread * bh*bw * sizeof(double) , hipMemcpyHostToDevice);
// kernel run
dim3 nBlocks(bw/TILE_DIM, ah/TILE_DIM, 1);
dim3 nThreads(TILE_DIM, TILE_DIM, 1);
for (int n=0; n<nMultiply_thread; n++) {
hipLaunchKernelGGL(( matmul_shared) , dim3(nBlocks), dim3(nThreads) , 0, 0, a_dev + n*ah*aw, b_dev + n*bh*bw,
c_dev + n*ah*bw, aw, bw);
}
// copy from device
// (*) fill host pointer
hipMemcpy(c + offsetC, c_dev, nMultiply_thread * ah*bw * sizeof(double) , hipMemcpyDeviceToHost);
hipDeviceSynchronize();
gettimeofday( &tt2[cpu_thread_id], NULL );
}
// timing
for (int i=0; i<num_gpus; i++) {
ms[i] = (tt2[i].tv_sec - tt1[i].tv_sec);
ms[i] = ms[i] * 1000000 + (tt2[i].tv_usec - tt1[i].tv_usec);
fms[i] = ((double)ms[i])/1000000.0;
cout << "Thread : " << i << " computed " << nMultiplyThread[i]
<< " matrix multiplications : Comp time = " << fms[i] << endl;
}
cout << "value check = " << c[145] << endl;
}
| 0a1ef37c963693319ba3a06294a144c36192986d.cu | #include <iostream>
#include <sys/time.h>
#include <omp.h>
#define TILE_DIM 32
using namespace std;
/* Multiple GPUs
* - distribute GPUs to OpenMP threads
* - distribute data (computation) to GPUs
* - (export CUDA_VISIBLE_DEVICES=1,2)
*/
__global__ void matmul_shared(double *a, double* b, double *c, int aw, int bw) {
__shared__ double aTile[TILE_DIM][TILE_DIM], bTile[TILE_DIM][TILE_DIM];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0.0;
for (int ia=0; ia<aw; ia+=TILE_DIM) {
aTile[threadIdx.y][threadIdx.x] = a[row*aw + ia + threadIdx.x];
bTile[threadIdx.y][threadIdx.x] = b[(ia+threadIdx.y)*bw+col];
__syncthreads();
for (int i = 0; i < TILE_DIM; i++) {
sum += aTile[threadIdx.y][i]* bTile[i][threadIdx.x];
}
__syncthreads();
}
c[row*bw+col] = sum;
}
int main() {
//---------------------------------------------------------------
// number of gpus
int num_gpus = 0;
// (*) get number of devices using cudaGetDeviceCount
cudaGetDeviceCount(&num_gpus);
cout << "number of host CPUs : " << omp_get_num_procs() << endl;
cout << "number of CUDA devices: " << num_gpus << endl;
// set number of threads
omp_set_num_threads(num_gpus);
//---------------------------------------------------------------
// number of matrix multiplication operations
int nMultiply = 10;
// work distribution to devices (two devices assumed)
// (*) find optimum distribution
int nMultiplyThread[] = {7,3};
time_t sTime = time(NULL);
timeval tt1[num_gpus], tt2[num_gpus];
int ms[num_gpus];
double fms[num_gpus];
int ah=2560;
int aw=2560;
int bh=aw;
int bw=2560;
// host arrays
double *a = (double*)malloc(nMultiply * ah*aw*sizeof(double));
double *b = (double*)malloc(nMultiply * bh*bw*sizeof(double));
double *c = (double*)malloc(nMultiply * ah*bw*sizeof(double));
for (int k=0;k<nMultiply;k++) {
for (int i=0;i<ah;i++)
for (int j=0;j<aw;j++)
a[i*ah+j+ k*ah*aw] = (double)(i+j)*(k+1);
for (int i=0;i<bh;i++)
for (int j=0;j<bw;j++)
b[i*bh+j + k*bh*bw] = (double)(i-j)*(k+1);
}
#pragma omp parallel
{
// get thread id
int cpu_thread_id = omp_get_thread_num();
int gpu_id = -1;
// (*) set GPU device for this CPU thread
cudaSetDevice(cpu_thread_id % num_gpus);
cudaGetDevice(&gpu_id);
cout << "Thread : " << cpu_thread_id << " uses GPU # " << gpu_id << endl;
// number of multiplies for this CPU thread
int nMultiply_thread = nMultiplyThread[cpu_thread_id];
// device arrays
double *a_dev;
cudaMalloc((void**) &a_dev, nMultiply_thread * ah*aw * sizeof(double));
double *b_dev;
cudaMalloc((void**) &b_dev, nMultiply_thread * bh*bw * sizeof(double));
double *c_dev;
cudaMalloc((void**) &c_dev, nMultiply_thread * ah*bw * sizeof(double));
// copy to device
// each thread calculates its host data offset
int previous = 0;
for (int pt=0; pt<cpu_thread_id; pt++)
previous += nMultiplyThread[pt];
int offsetA = previous * ah*aw;
int offsetB = previous * bh*bw;
int offsetC = previous * ah*bw;
cudaThreadSynchronize();
gettimeofday( &tt1[cpu_thread_id], NULL );
// (*) fill host pointers
cudaMemcpy(a_dev, a + offsetA, nMultiply_thread * ah*aw * sizeof(double) , cudaMemcpyHostToDevice);
cudaMemcpy(b_dev, b + offsetB, nMultiply_thread * bh*bw * sizeof(double) , cudaMemcpyHostToDevice);
// kernel run
dim3 nBlocks(bw/TILE_DIM, ah/TILE_DIM, 1);
dim3 nThreads(TILE_DIM, TILE_DIM, 1);
for (int n=0; n<nMultiply_thread; n++) {
matmul_shared <<< nBlocks, nThreads >>> (a_dev + n*ah*aw, b_dev + n*bh*bw,
c_dev + n*ah*bw, aw, bw);
}
// copy from device
// (*) fill host pointer
cudaMemcpy(c + offsetC, c_dev, nMultiply_thread * ah*bw * sizeof(double) , cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
gettimeofday( &tt2[cpu_thread_id], NULL );
}
// timing
for (int i=0; i<num_gpus; i++) {
ms[i] = (tt2[i].tv_sec - tt1[i].tv_sec);
ms[i] = ms[i] * 1000000 + (tt2[i].tv_usec - tt1[i].tv_usec);
fms[i] = ((double)ms[i])/1000000.0;
cout << "Thread : " << i << " computed " << nMultiplyThread[i]
<< " matrix multiplications : Comp time = " << fms[i] << endl;
}
cout << "value check = " << c[145] << endl;
}
|
44afe76a5c682cb71b7ab8fe8a4b9b3a1ddb113a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdafx.h"
#include "ConcatLayerCudaEngine.h"
#include "core/cuda_platform.h"
using namespace np::engine;
using namespace np::engine::layers;
using namespace np::core::cuda;
ConcatLayerCudaEngine::ConcatLayerCudaEngine(const NetworkParameter& net_param, const network::HiddenLayer& layer)
: ConcatLayerEngine(net_param, layer)
{
}
ConcatLayerCudaEngine::~ConcatLayerCudaEngine()
{
}
__global__ void Concat(const neuro_u32 N, const neuro_float* in_data,
const bool forward, const int num_concats, const int concat_size,
const int output_concat_axis, const int input_concat_axis,
const int offset_concat_axis, neuro_float* out_data)
{
CUDA_KERNEL_LOOP(index, N)
{
const int total_concat_size = concat_size * input_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int output_index = concat_index + (concat_num * output_concat_axis + offset_concat_axis) * concat_size;
if (forward)
{
out_data[output_index] = in_data[index];
}
else {
out_data[index] = in_data[output_index];
}
}
}
bool ConcatLayerCudaEngine::Forward(bool bTrain, neuro_u32 batch_size, const _NEURO_TENSOR_DATA& output_buffer)
{
if (m_input_vector.size() == 0)
return true;
neuron_value* output_ptr = output_buffer.data.buffer;
int offset_concat_axis = 0;
for (neuro_u32 input_i = 0, input_n = m_input_vector.size(); input_i < input_n; input_i++)
{
_NEURO_TENSOR_DATA input_data;
if (!GetInputData(m_input_vector[input_i], input_data))
{
DEBUG_OUTPUT(L"no input data");
return false;
}
const neuron_value* input = input_data.data.buffer;
const tensor::TensorShape& in_ts = m_input_vector[input_i].engine->GetOutTensorShape();
const int input_concat_axis = m_concat_axis < 0 ? 1 : in_ts[m_concat_axis];
const int input_concat_size = input_concat_axis * m_concat_input_size;
const int nthreads = input_concat_size * m_num_concats;
Concat << <CudaPlatform::GetCudaBlockCount(nthreads), CudaPlatform::threadsPerBlock >> >(
nthreads, input, true, m_num_concats, m_concat_input_size,
m_output_concat_axis_size, input_concat_axis, offset_concat_axis, output_ptr);
if (!CudaPlatform::CudaErrorCheck(hipPeekAtLastError()))
{
DEBUG_OUTPUT(L"failed Concat");
return false;
}
offset_concat_axis += input_concat_axis;
}
return true;
}
bool ConcatLayerCudaEngine::Backward(neuro_u32 batch_size)
{
neuron_value* error_ptr = m_error_buffer.data.buffer;
int offset_concat_axis = 0;
for (neuro_u32 input_i = 0, input_n = m_input_vector.size(); input_i < input_n; input_i++)
{
HiddenLayerEngine* input_engine = (HiddenLayerEngine*)m_input_vector[input_i].engine;
if (input_engine->GetLayerType() == network::_layer_type::input)
continue;
const _NEURO_TENSOR_DATA& input_error = input_engine->GetErrorBuffer();
const tensor::TensorShape& in_ts = m_input_vector[input_i].engine->GetOutTensorShape();
const int input_concat_axis = m_concat_axis < 0 ? 1 : in_ts[m_concat_axis];
const int input_concat_size = input_concat_axis * m_concat_input_size;
const int nthreads = input_concat_size * m_num_concats;
if (m_input_vector[input_i].engine->GetLayerType()!=network::_layer_type::input)
{
neuron_value* in_err_ptr = input_error.data.buffer;
Concat << <CudaPlatform::GetCudaBlockCount(nthreads), CudaPlatform::threadsPerBlock >> >(
nthreads, error_ptr, false, m_num_concats, m_concat_input_size,
m_output_concat_axis_size, input_concat_axis, offset_concat_axis, in_err_ptr);
if (!CudaPlatform::CudaErrorCheck(hipPeekAtLastError()))
{
DEBUG_OUTPUT(L"failed Concat");
return false;
}
}
offset_concat_axis += input_concat_axis;
}
return true;
}
| 44afe76a5c682cb71b7ab8fe8a4b9b3a1ddb113a.cu | #include "stdafx.h"
#include "ConcatLayerCudaEngine.h"
#include "core/cuda_platform.h"
using namespace np::engine;
using namespace np::engine::layers;
using namespace np::core::cuda;
ConcatLayerCudaEngine::ConcatLayerCudaEngine(const NetworkParameter& net_param, const network::HiddenLayer& layer)
: ConcatLayerEngine(net_param, layer)
{
}
ConcatLayerCudaEngine::~ConcatLayerCudaEngine()
{
}
__global__ void Concat(const neuro_u32 N, const neuro_float* in_data,
const bool forward, const int num_concats, const int concat_size,
const int output_concat_axis, const int input_concat_axis,
const int offset_concat_axis, neuro_float* out_data)
{
CUDA_KERNEL_LOOP(index, N)
{
const int total_concat_size = concat_size * input_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int output_index = concat_index + (concat_num * output_concat_axis + offset_concat_axis) * concat_size;
if (forward)
{
out_data[output_index] = in_data[index];
}
else {
out_data[index] = in_data[output_index];
}
}
}
bool ConcatLayerCudaEngine::Forward(bool bTrain, neuro_u32 batch_size, const _NEURO_TENSOR_DATA& output_buffer)
{
if (m_input_vector.size() == 0)
return true;
neuron_value* output_ptr = output_buffer.data.buffer;
int offset_concat_axis = 0;
for (neuro_u32 input_i = 0, input_n = m_input_vector.size(); input_i < input_n; input_i++)
{
_NEURO_TENSOR_DATA input_data;
if (!GetInputData(m_input_vector[input_i], input_data))
{
DEBUG_OUTPUT(L"no input data");
return false;
}
const neuron_value* input = input_data.data.buffer;
const tensor::TensorShape& in_ts = m_input_vector[input_i].engine->GetOutTensorShape();
const int input_concat_axis = m_concat_axis < 0 ? 1 : in_ts[m_concat_axis];
const int input_concat_size = input_concat_axis * m_concat_input_size;
const int nthreads = input_concat_size * m_num_concats;
Concat << <CudaPlatform::GetCudaBlockCount(nthreads), CudaPlatform::threadsPerBlock >> >(
nthreads, input, true, m_num_concats, m_concat_input_size,
m_output_concat_axis_size, input_concat_axis, offset_concat_axis, output_ptr);
if (!CudaPlatform::CudaErrorCheck(cudaPeekAtLastError()))
{
DEBUG_OUTPUT(L"failed Concat");
return false;
}
offset_concat_axis += input_concat_axis;
}
return true;
}
bool ConcatLayerCudaEngine::Backward(neuro_u32 batch_size)
{
neuron_value* error_ptr = m_error_buffer.data.buffer;
int offset_concat_axis = 0;
for (neuro_u32 input_i = 0, input_n = m_input_vector.size(); input_i < input_n; input_i++)
{
HiddenLayerEngine* input_engine = (HiddenLayerEngine*)m_input_vector[input_i].engine;
if (input_engine->GetLayerType() == network::_layer_type::input)
continue;
const _NEURO_TENSOR_DATA& input_error = input_engine->GetErrorBuffer();
const tensor::TensorShape& in_ts = m_input_vector[input_i].engine->GetOutTensorShape();
const int input_concat_axis = m_concat_axis < 0 ? 1 : in_ts[m_concat_axis];
const int input_concat_size = input_concat_axis * m_concat_input_size;
const int nthreads = input_concat_size * m_num_concats;
if (m_input_vector[input_i].engine->GetLayerType()!=network::_layer_type::input)
{
neuron_value* in_err_ptr = input_error.data.buffer;
Concat << <CudaPlatform::GetCudaBlockCount(nthreads), CudaPlatform::threadsPerBlock >> >(
nthreads, error_ptr, false, m_num_concats, m_concat_input_size,
m_output_concat_axis_size, input_concat_axis, offset_concat_axis, in_err_ptr);
if (!CudaPlatform::CudaErrorCheck(cudaPeekAtLastError()))
{
DEBUG_OUTPUT(L"failed Concat");
return false;
}
}
offset_concat_axis += input_concat_axis;
}
return true;
}
|
5ee4dbd63df968c66dda46c8279fd407a5a34cc9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
GPU Kernels for the mesh to particles functions
@author: Stefan Hegglin, Adrian Oeftiger
*/
extern "C" {
__global__ void mesh_to_particles_2d(int nparticles,
double* particles_quantity, double *mesh_quantity,
const int stridex,
double *wij, double *wi1j, double *wij1, double *wi1j1,
int *i, int *j)
{
int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x;
int ix = i[pidx];
int jx = j[pidx];
if (pidx < nparticles) {
particles_quantity[pidx] = ( wij[pidx] * mesh_quantity[jx + ix*stridex ]
+ wij1[pidx] * mesh_quantity[jx+1 + ix*stridex ]
+ wi1j[pidx] * mesh_quantity[jx+ + (ix+1)*stridex]
+ wi1j1[pidx] * mesh_quantity[jx+1 + (ix+1)*stridex]);
}
}
__global__ void field_to_particles_2d(int nparticles,
double* forcex, double* forcey, double* fieldx, double* fieldy,
const int stride, double *wij, double *wi1j, double *wij1, double *wi1j1, int *i, int *j)
{
int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x;
int jx = j[pidx];
int ix = i[pidx];
if (pidx < nparticles) {
forcex[pidx] = ( wij[pidx] * fieldx[jx + ix*stride]
+ wij1[pidx] * fieldx[jx+1 + ix*stride]
+ wi1j[pidx] * fieldx[jx + (ix+1)*stride]
+ wi1j1[pidx] *fieldx[jx+1 + (ix+1)*stride]);
forcey[pidx] = ( wij[pidx] * fieldy[jx + ix*stride]
+ wij1[pidx] * fieldy[jx+1 + ix*stride]
+ wi1j[pidx] * fieldy[jx + (ix+1)*stride]
+ wi1j1[pidx] *fieldy[jx+1 + (ix+1)*stride]);
}
}
__global__ void field_to_particles_3d(int nparticles,
double* forcex, double* forcey, double* forcez,
double* fieldx, double* fieldy, double* fieldz,
const int stridex, const int stridey,
double *wijk, double *wi1jk, double *wij1k, double *wi1j1k,
double *wijk1, double *wi1jk1, double* wij1k1, double* wi1j1k1,
int *i, int *j, int* k)
{
int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x;
int ix = i[pidx];
int jx = j[pidx];
int kx = k[pidx];
if (pidx < nparticles) {
forcex[pidx] = ( wijk[pidx] * fieldx[jx + ix*stridex + kx*stridex*stridey]
+ wij1k[pidx] * fieldx[jx+1 + ix*stridex + kx*stridex*stridey]
+ wi1jk[pidx] * fieldx[jx+ + (ix+1)*stridex + kx*stridex*stridey]
+ wi1j1k[pidx] * fieldx[jx+1 + (ix+1)*stridex + kx*stridex*stridey]
+ wijk1[pidx] * fieldx[jx + ix*stridex + (kx+1)*stridex*stridey]
+ wij1k1[pidx] * fieldx[jx+1 + ix*stridex + (kx+1)*stridex*stridey]
+ wi1jk1[pidx] * fieldx[jx+ + (ix+1)*stridex + (kx+1)*stridex*stridey]
+ wi1j1k1[pidx]* fieldx[jx+1 + (ix+1)*stridex + (kx+1)*stridex*stridey]);
forcey[pidx] = ( wijk[pidx] * fieldy[jx + ix*stridex + kx*stridex*stridey]
+ wij1k[pidx] * fieldy[jx+1 + ix*stridex + kx*stridex*stridey]
+ wi1jk[pidx] * fieldy[jx+ + (ix+1)*stridex + kx*stridex*stridey]
+ wi1j1k[pidx] * fieldy[jx+1 + (ix+1)*stridex + kx*stridex*stridey]
+ wijk1[pidx] * fieldy[jx + ix*stridex + (kx+1)*stridex*stridey]
+ wij1k1[pidx] * fieldy[jx+1 + ix*stridex + (kx+1)*stridex*stridey]
+ wi1jk1[pidx] * fieldy[jx+ + (ix+1)*stridex + (kx+1)*stridex*stridey]
+ wi1j1k1[pidx]* fieldy[jx+1 + (ix+1)*stridex + (kx+1)*stridex*stridey]);
forcez[pidx] = ( wijk[pidx] * fieldz[jx + ix*stridex + kx*stridex*stridey]
+ wij1k[pidx] * fieldz[jx+1 + ix*stridex + kx*stridex*stridey]
+ wi1jk[pidx] * fieldz[jx+ + (ix+1)*stridex + kx*stridex*stridey]
+ wi1j1k[pidx] * fieldz[jx+1 + (ix+1)*stridex + kx*stridex*stridey]
+ wijk1[pidx] * fieldz[jx + ix*stridex + (kx+1)*stridex*stridey]
+ wij1k1[pidx] * fieldz[jx+1 + ix*stridex + (kx+1)*stridex*stridey]
+ wi1jk1[pidx] * fieldz[jx+ + (ix+1)*stridex + (kx+1)*stridex*stridey]
+ wi1j1k1[pidx]* fieldz[jx+1 + (ix+1)*stridex + (kx+1)*stridex*stridey]);
}
}
__global__ void mesh_to_particles_3d(int nparticles,
double* particles_quantity, double *mesh_quantity,
const int stridex, const int stridey,
double *wijk, double *wi1jk, double *wij1k, double *wi1j1k,
double *wijk1, double *wi1jk1, double* wij1k1, double* wi1j1k1,
int *i, int *j, int* k)
{
int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x;
int ix = i[pidx];
int jx = j[pidx];
int kx = k[pidx];
if (pidx < nparticles) {
particles_quantity[pidx] = ( wijk[pidx] * mesh_quantity[jx + ix*stridex + kx*stridex*stridey]
+ wij1k[pidx] * mesh_quantity[jx+1 + ix*stridex + kx*stridex*stridey]
+ wi1jk[pidx] * mesh_quantity[jx+ + (ix+1)*stridex + kx*stridex*stridey]
+ wi1j1k[pidx] * mesh_quantity[jx+1 + (ix+1)*stridex + kx*stridex*stridey]
+ wijk1[pidx] * mesh_quantity[jx + ix*stridex + (kx+1)*stridex*stridey]
+ wij1k1[pidx] * mesh_quantity[jx+1 + ix*stridex + (kx+1)*stridex*stridey]
+ wi1jk1[pidx] * mesh_quantity[jx+ + (ix+1)*stridex + (kx+1)*stridex*stridey]
+ wi1j1k1[pidx]* mesh_quantity[jx+1 + (ix+1)*stridex + (kx+1)*stridex*stridey]);
}
}
} /* end extern C */
| 5ee4dbd63df968c66dda46c8279fd407a5a34cc9.cu | /*
GPU Kernels for the mesh to particles functions
@author: Stefan Hegglin, Adrian Oeftiger
*/
extern "C" {
__global__ void mesh_to_particles_2d(int nparticles,
double* particles_quantity, double *mesh_quantity,
const int stridex,
double *wij, double *wi1j, double *wij1, double *wi1j1,
int *i, int *j)
{
int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x;
int ix = i[pidx];
int jx = j[pidx];
if (pidx < nparticles) {
particles_quantity[pidx] = ( wij[pidx] * mesh_quantity[jx + ix*stridex ]
+ wij1[pidx] * mesh_quantity[jx+1 + ix*stridex ]
+ wi1j[pidx] * mesh_quantity[jx+ + (ix+1)*stridex]
+ wi1j1[pidx] * mesh_quantity[jx+1 + (ix+1)*stridex]);
}
}
__global__ void field_to_particles_2d(int nparticles,
double* forcex, double* forcey, double* fieldx, double* fieldy,
const int stride, double *wij, double *wi1j, double *wij1, double *wi1j1, int *i, int *j)
{
int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x;
int jx = j[pidx];
int ix = i[pidx];
if (pidx < nparticles) {
forcex[pidx] = ( wij[pidx] * fieldx[jx + ix*stride]
+ wij1[pidx] * fieldx[jx+1 + ix*stride]
+ wi1j[pidx] * fieldx[jx + (ix+1)*stride]
+ wi1j1[pidx] *fieldx[jx+1 + (ix+1)*stride]);
forcey[pidx] = ( wij[pidx] * fieldy[jx + ix*stride]
+ wij1[pidx] * fieldy[jx+1 + ix*stride]
+ wi1j[pidx] * fieldy[jx + (ix+1)*stride]
+ wi1j1[pidx] *fieldy[jx+1 + (ix+1)*stride]);
}
}
__global__ void field_to_particles_3d(int nparticles,
double* forcex, double* forcey, double* forcez,
double* fieldx, double* fieldy, double* fieldz,
const int stridex, const int stridey,
double *wijk, double *wi1jk, double *wij1k, double *wi1j1k,
double *wijk1, double *wi1jk1, double* wij1k1, double* wi1j1k1,
int *i, int *j, int* k)
{
int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x;
int ix = i[pidx];
int jx = j[pidx];
int kx = k[pidx];
if (pidx < nparticles) {
forcex[pidx] = ( wijk[pidx] * fieldx[jx + ix*stridex + kx*stridex*stridey]
+ wij1k[pidx] * fieldx[jx+1 + ix*stridex + kx*stridex*stridey]
+ wi1jk[pidx] * fieldx[jx+ + (ix+1)*stridex + kx*stridex*stridey]
+ wi1j1k[pidx] * fieldx[jx+1 + (ix+1)*stridex + kx*stridex*stridey]
+ wijk1[pidx] * fieldx[jx + ix*stridex + (kx+1)*stridex*stridey]
+ wij1k1[pidx] * fieldx[jx+1 + ix*stridex + (kx+1)*stridex*stridey]
+ wi1jk1[pidx] * fieldx[jx+ + (ix+1)*stridex + (kx+1)*stridex*stridey]
+ wi1j1k1[pidx]* fieldx[jx+1 + (ix+1)*stridex + (kx+1)*stridex*stridey]);
forcey[pidx] = ( wijk[pidx] * fieldy[jx + ix*stridex + kx*stridex*stridey]
+ wij1k[pidx] * fieldy[jx+1 + ix*stridex + kx*stridex*stridey]
+ wi1jk[pidx] * fieldy[jx+ + (ix+1)*stridex + kx*stridex*stridey]
+ wi1j1k[pidx] * fieldy[jx+1 + (ix+1)*stridex + kx*stridex*stridey]
+ wijk1[pidx] * fieldy[jx + ix*stridex + (kx+1)*stridex*stridey]
+ wij1k1[pidx] * fieldy[jx+1 + ix*stridex + (kx+1)*stridex*stridey]
+ wi1jk1[pidx] * fieldy[jx+ + (ix+1)*stridex + (kx+1)*stridex*stridey]
+ wi1j1k1[pidx]* fieldy[jx+1 + (ix+1)*stridex + (kx+1)*stridex*stridey]);
forcez[pidx] = ( wijk[pidx] * fieldz[jx + ix*stridex + kx*stridex*stridey]
+ wij1k[pidx] * fieldz[jx+1 + ix*stridex + kx*stridex*stridey]
+ wi1jk[pidx] * fieldz[jx+ + (ix+1)*stridex + kx*stridex*stridey]
+ wi1j1k[pidx] * fieldz[jx+1 + (ix+1)*stridex + kx*stridex*stridey]
+ wijk1[pidx] * fieldz[jx + ix*stridex + (kx+1)*stridex*stridey]
+ wij1k1[pidx] * fieldz[jx+1 + ix*stridex + (kx+1)*stridex*stridey]
+ wi1jk1[pidx] * fieldz[jx+ + (ix+1)*stridex + (kx+1)*stridex*stridey]
+ wi1j1k1[pidx]* fieldz[jx+1 + (ix+1)*stridex + (kx+1)*stridex*stridey]);
}
}
__global__ void mesh_to_particles_3d(int nparticles,
double* particles_quantity, double *mesh_quantity,
const int stridex, const int stridey,
double *wijk, double *wi1jk, double *wij1k, double *wi1j1k,
double *wijk1, double *wi1jk1, double* wij1k1, double* wi1j1k1,
int *i, int *j, int* k)
{
int pidx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y* blockDim.x + threadIdx.x;
int ix = i[pidx];
int jx = j[pidx];
int kx = k[pidx];
if (pidx < nparticles) {
particles_quantity[pidx] = ( wijk[pidx] * mesh_quantity[jx + ix*stridex + kx*stridex*stridey]
+ wij1k[pidx] * mesh_quantity[jx+1 + ix*stridex + kx*stridex*stridey]
+ wi1jk[pidx] * mesh_quantity[jx+ + (ix+1)*stridex + kx*stridex*stridey]
+ wi1j1k[pidx] * mesh_quantity[jx+1 + (ix+1)*stridex + kx*stridex*stridey]
+ wijk1[pidx] * mesh_quantity[jx + ix*stridex + (kx+1)*stridex*stridey]
+ wij1k1[pidx] * mesh_quantity[jx+1 + ix*stridex + (kx+1)*stridex*stridey]
+ wi1jk1[pidx] * mesh_quantity[jx+ + (ix+1)*stridex + (kx+1)*stridex*stridey]
+ wi1j1k1[pidx]* mesh_quantity[jx+1 + (ix+1)*stridex + (kx+1)*stridex*stridey]);
}
}
} /* end extern C */
|
b6f9b80f2e1163ee7680daf7b591fa30ab29aa7b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_2d_layer_updater_cuda_kepler.h"
#include <hip/hip_runtime.h>
#include <boost/format.hpp>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "cuda_texture.h"
#include "../convolution_layer.h"
texture<float, hipTextureType1D, hipReadModeElementType> output_tex_ref;
#define FEATURE_MAP_BLOCK_SIZE 4
#define WINDOW_WIDTH_LOCAL 4
struct __align__(4) xy_config
{
xy_config(int y, int x)
{
this->xy_pair = (((unsigned int)y) << 16) | (unsigned int)x;
}
unsigned int xy_pair;
};
struct __align__(4) feature_map_config
{
feature_map_config(int input_feature_map_id, int output_feature_map_id)
{
this->feature_map_pair = (((unsigned int)input_feature_map_id) << 16) | (unsigned int)output_feature_map_id;
}
unsigned int feature_map_pair;
};
struct __align__(4) output_y_weight_y_config
{
output_y_weight_y_config(int output_y, int weight_y)
{
this->output_y_window_y_pair = (((unsigned int)output_y) << 16) | (unsigned int)weight_y;
}
unsigned int output_y_window_y_pair;
};
struct __align__(4) output_y_weight_y_weight_x_config
{
output_y_weight_y_weight_x_config(int output_y, int weight_y, int weight_x)
{
this->output_y_window_y_window_x_pair = (((unsigned int)output_y) << 16) | (((unsigned int)weight_y) << 8) | ((unsigned int)weight_x);
}
unsigned int output_y_window_y_window_x_pair;
};
template<int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_2d_tex_upd_kernel_kepler(
float * __restrict output,
hipTextureObject_t input_tex,
hipTextureObject_t weights_tex,
const float * __restrict biases,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_width,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int base_input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_output_feature_map = window_width * window_height * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_height) + y) * input_width + x + texture_offset;
int weights_offset = ((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * window_width * window_height;
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll 4
for(int input_x = 0; input_x < window_width; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_output_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch<float>(input_tex, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
weights_offset++;
input_elem_id++;
}
input_elem_id += input_width - window_width;
}
input_elem_id += input_width * (input_height - window_height);
}
float * base_output = output + ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_2d_tex_exact_upd_kernel_kepler(
float * __restrict output,
hipTextureObject_t input_tex,
hipTextureObject_t weights_tex,
const float * __restrict biases,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int base_input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_output_feature_map = WINDOW_WIDTH * window_height * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_height) + y) * input_width + x + texture_offset;
int weights_offset = ((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * WINDOW_WIDTH * window_height;
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_output_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch<float>(input_tex, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
weights_offset++;
input_elem_id++;
}
input_elem_id += input_width - WINDOW_WIDTH;
}
input_elem_id += input_width * (input_height - window_height);
}
float * base_output = output + ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
__global__ void convolution_2d_update_biases_upd_kernel_kepler(
float * __restrict biases,
const float * __restrict output_errors,
const float * __restrict training_speed,
int output_feature_map_count,
int output_elem_count_per_feature_map,
int min_iteration_count)
{
int thread_id = threadIdx.x;
int output_feature_map_id = blockIdx.y;
int entry_id = blockIdx.z;
int threadblock_size = blockDim.x;
float sum = 0.0F;
const float * current_error = output_errors + (entry_id * output_feature_map_count + output_feature_map_id) * output_elem_count_per_feature_map;
int current_output_neuron_id = thread_id;
for(int i = 0; i < min_iteration_count; ++i)
{
sum += current_error[current_output_neuron_id];
current_output_neuron_id += threadblock_size;
}
if (current_output_neuron_id < output_elem_count_per_feature_map)
sum += current_error[current_output_neuron_id];
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
sum += __shfl_down(sum, tx);
}
if (lane_id == 0)
{
int offset = entry_id * output_feature_map_count + output_feature_map_id;
float current_training_speed_val = training_speed[offset];
atomicAdd(biases + offset, sum * current_training_speed_val);
}
}
template<int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_2d_deriviative_tex_upd_kernel_kepler(
float * __restrict input_errors,
hipTextureObject_t output_tex,
hipTextureObject_t weights_tex,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_width,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int base_output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_input_feature_map = window_width * window_height;
int output_elem_id = ((entry_id * output_feature_map_count + base_output_feature_map_id) * output_height + y) * output_width + x;
int weights_offset = ((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_width * window_height;
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit1 = (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
int input_x = 0;
#pragma unroll 1
for(; input_x < (window_width - (WINDOW_WIDTH_LOCAL - 1)); input_x += WINDOW_WIDTH_LOCAL)
{
float output_vals[BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1; ++i)
{
bool b_fit2 = b_fit1 && (i > min_x_exclusive) && (i <= max_x_inclusive);;
if (b_fit2)
output_vals[i] = tex1Dfetch<float>(output_tex, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
output_elem_id -= WINDOW_WIDTH_LOCAL;
#pragma unroll
for(int input_x_local = 0; input_x_local < WINDOW_WIDTH_LOCAL; ++input_x_local)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_input_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x_local + j] * weight_list[i];
}
weights_offset++;
}
}
#pragma unroll 1
for(; input_x < window_width; ++input_x)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
bool b_fit2 = b_fit1 && (input_x + j > min_x_exclusive) && (input_x + j <= max_x_inclusive);
if (b_fit2)
{
float inp = tex1Dfetch<float>(output_tex, output_elem_id - j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_input_feature_map * i);
}
}
weights_offset++;
output_elem_id--;
}
output_elem_id += window_width - output_width;
}
weights_offset += window_width * window_height * (input_feature_map_count - 1);
output_elem_id += output_width * (output_height + window_height);
}
float * base_input = input_errors + ((entry_id * input_feature_map_count + input_feature_map_id) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_2d_deriviative_tex_exact_upd_kernel_kepler(
float * __restrict input_errors,
hipTextureObject_t output_tex,
hipTextureObject_t weights_tex,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int base_output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_input_feature_map = WINDOW_WIDTH * window_height;
int output_elem_id = ((entry_id * output_feature_map_count + base_output_feature_map_id) * output_height + y) * output_width + x;
int weights_offset = ((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * WINDOW_WIDTH * window_height;
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
unsigned int mask = 0;
for(int i = BLOCK_SIZE + WINDOW_WIDTH - 2; i >= 0; --i)
mask = mask << 1 | (((i > min_x_exclusive) && (i <= max_x_inclusive)) ? 1 : 0);
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit1 = (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
float output_vals[BLOCK_SIZE + WINDOW_WIDTH - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH - 1; ++i)
{
bool b_fit2 = b_fit1 && (((1 << i) & mask) != 0);
if (b_fit2)
output_vals[i] = tex1Dfetch<float>(output_tex, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_input_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x + j] * weight_list[i];
}
weights_offset++;
}
output_elem_id -= output_width;
}
weights_offset += WINDOW_WIDTH * window_height * (input_feature_map_count - 1);
output_elem_id += output_width * (output_height + window_height);
}
float * base_input = input_errors + ((entry_id * input_feature_map_count + input_feature_map_id) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<bool single_output_y_group>
__global__ void convolution_2d_update_weights_upd_kernel_kepler(
float * __restrict weights,
hipTextureObject_t input_tex,
const float * __restrict training_speed,
const output_y_weight_y_weight_x_config * __restrict output_y_weight_y_weight_x_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_width,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_y_group_count,
int texture_offset,
int entry_count,
bool different_input,
int output_y_weight_y_weight_x_config_count,
int feature_map_config_count)
{
int output_y_weight_y_weight_x_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((output_y_weight_y_weight_x_config_id < output_y_weight_y_weight_x_config_count) && (feature_map_config_id < feature_map_config_count) && (entry_id < entry_count))
{
output_y_weight_y_weight_x_config yw = output_y_weight_y_weight_x_config_list[output_y_weight_y_weight_x_config_id];
int weight_x = yw.output_y_window_y_window_x_pair & 0xFF;
int weight_y = (yw.output_y_window_y_window_x_pair & 0xFFFF) >> 8;
int output_y_start_id = yw.output_y_window_y_window_x_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int output_neuron_count_per_feature_map = output_width * output_height;
int output_elem_id = ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + output_y_start_id) * output_width;
int input_elem_id = (((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_height + weight_y + output_y_start_id) * input_width + texture_offset + weight_x;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL; ++i)
sums[i] = 0.0F;
for(int output_y = output_y_start_id; output_y < output_height; output_y += output_y_group_count)
{
float input_buf[WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH_LOCAL; ++i)
{
input_buf[i] = tex1Dfetch<float>(input_tex, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = tex1Dfetch(output_tex_ref, output_elem_id + output_neuron_count_per_feature_map * i);
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH_LOCAL - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH_LOCAL - 1] = tex1Dfetch<float>(input_tex, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
sums[i * WINDOW_WIDTH_LOCAL + j] += output_error_list[i] * input_buf[j];
output_elem_id++;
input_elem_id++;
}
output_elem_id += output_width * (output_y_group_count - 1);
input_elem_id += input_width * (output_y_group_count - 1) + (window_width - WINDOW_WIDTH_LOCAL);
}
int offset = (((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_height + weight_y) * window_width + weight_x;
int weight_count_per_output_feature_map = input_feature_map_count * window_height * window_width;
float * cur_weights = weights + offset;
const float * cur_training_speed = training_speed + offset;
if (single_output_y_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH_LOCAL + j] * cur_training_speed[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH_LOCAL + j] * cur_training_speed[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
template<int WINDOW_WIDTH, bool single_output_y_group>
__global__ void convolution_2d_update_weights_exact_upd_kernel_kepler(
float * __restrict weights,
hipTextureObject_t input_tex,
const float * __restrict training_speed,
const output_y_weight_y_config * __restrict output_y_weight_y_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_y_group_count,
int texture_offset,
int entry_count,
bool different_input,
int output_y_weight_y_config_count,
int feature_map_config_count)
{
int output_y_weight_y_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((output_y_weight_y_config_id < output_y_weight_y_config_count) && (feature_map_config_id < feature_map_config_count) && (entry_id < entry_count))
{
output_y_weight_y_config yw = output_y_weight_y_config_list[output_y_weight_y_config_id];
int weight_y = yw.output_y_window_y_pair & 0xFFFF;
int output_y_start_id = yw.output_y_window_y_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int output_neuron_count_per_feature_map = output_width * output_height;
int output_elem_id = ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + output_y_start_id) * output_width;
int input_elem_id = (((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_height + weight_y + output_y_start_id) * input_width + texture_offset;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH; ++i)
sums[i] = 0.0F;
for(int output_y = output_y_start_id; output_y < output_height; output_y += output_y_group_count)
{
float input_buf[WINDOW_WIDTH];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
input_buf[i] = tex1Dfetch<float>(input_tex, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = tex1Dfetch(output_tex_ref, output_elem_id + output_neuron_count_per_feature_map * i);
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH - 1] = tex1Dfetch<float>(input_tex, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
sums[i * WINDOW_WIDTH + j] += output_error_list[i] * input_buf[j];
output_elem_id++;
input_elem_id++;
}
output_elem_id += output_width * (output_y_group_count - 1);
input_elem_id += input_width * (output_y_group_count - 1);
}
int offset = (((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_height + weight_y) * WINDOW_WIDTH;
int weight_count_per_output_feature_map = input_feature_map_count * window_height * WINDOW_WIDTH;
float * cur_weights = weights + offset;
const float * cur_training_speed = training_speed + offset;
if (single_output_y_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH + j] * cur_training_speed[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH + j] * cur_training_speed[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
namespace nnforge
{
namespace cuda
{
convolution_2d_layer_updater_cuda_kepler::convolution_2d_layer_updater_cuda_kepler()
{
output_tex_ref.addressMode[0] = hipAddressModeBorder;
output_tex_ref.normalized = false;
}
convolution_2d_layer_updater_cuda_kepler::~convolution_2d_layer_updater_cuda_kepler()
{
}
#define MAX_BLOCK_SIZE 5
#define MAX_WINDOW_WIDTH 10
#define launch_exact_kernel_const_const(window_width_const, block_size_const, single_input_feature_map_group) \
hipLaunchKernelGGL(( convolution_2d_tex_exact_upd_kernel_kepler<window_width_const,block_size_const,single_input_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_neurons_buffer, input_tex, weights_tex, *data[1], xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, xy_config_count, feature_map_config_count);
#define launch_exact_kernel_const(window_width, block_size_const, single_input_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, block_size_const, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const_const(2, block_size_const, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const_const(3, block_size_const, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const_const(4, block_size_const, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const_const(5, block_size_const, single_input_feature_map_group); \
break; \
case 6: \
launch_exact_kernel_const_const(6, block_size_const, single_input_feature_map_group); \
break; \
case 7: \
launch_exact_kernel_const_const(7, block_size_const, single_input_feature_map_group); \
break; \
case 8: \
launch_exact_kernel_const_const(8, block_size_const, single_input_feature_map_group); \
break; \
case 9: \
launch_exact_kernel_const_const(9, block_size_const, single_input_feature_map_group); \
break; \
case 10: \
launch_exact_kernel_const_const(10, block_size_const, single_input_feature_map_group); \
break; \
};
#define launch_exact_kernel(window_width, block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const(window_width, 5, single_input_feature_map_group); \
break; \
};
#define launch_kernel_const(block_size_const, single_input_feature_map_group) \
hipLaunchKernelGGL(( convolution_2d_tex_upd_kernel_kepler<block_size_const,single_input_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_neurons_buffer, input_tex, weights_tex, *data[1], xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[0], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, xy_config_count, feature_map_config_count);
#define launch_kernel(block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_kernel_const(1, single_input_feature_map_group); \
break; \
case 2: \
launch_kernel_const(2, single_input_feature_map_group); \
break; \
case 3: \
launch_kernel_const(3, single_input_feature_map_group); \
break; \
case 4: \
launch_kernel_const(4, single_input_feature_map_group); \
break; \
case 5: \
launch_kernel_const(5, single_input_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel_const_const(window_width_const, block_size_const, single_output_feature_map_group) \
hipLaunchKernelGGL(( convolution_2d_deriviative_tex_exact_upd_kernel_kepler<window_width_const,block_size_const,single_output_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, output_tex, weights_tex, xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, xy_config_count, feature_map_config_count);
#define launch_backprop_exact_kernel_const(window_width, block_size_const, single_output_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_backprop_exact_kernel_const_const(1, block_size_const, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const_const(2, block_size_const, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const_const(3, block_size_const, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const_const(4, block_size_const, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const_const(5, block_size_const, single_output_feature_map_group); \
break; \
case 6: \
launch_backprop_exact_kernel_const_const(6, block_size_const, single_output_feature_map_group); \
break; \
case 7: \
launch_backprop_exact_kernel_const_const(7, block_size_const, single_output_feature_map_group); \
break; \
case 8: \
launch_backprop_exact_kernel_const_const(8, block_size_const, single_output_feature_map_group); \
break; \
case 9: \
launch_backprop_exact_kernel_const_const(9, block_size_const, single_output_feature_map_group); \
break; \
case 10: \
launch_backprop_exact_kernel_const_const(10, block_size_const, single_output_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel(window_width, block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_exact_kernel_const(window_width, 1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const(window_width, 2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const(window_width, 3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const(window_width, 4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const(window_width, 5, single_output_feature_map_group); \
break; \
};
#define launch_backprop_kernel_const(block_size_const, single_output_feature_map_group) \
hipLaunchKernelGGL(( convolution_2d_deriviative_tex_upd_kernel_kepler<block_size_const,single_output_feature_map_group>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, output_tex, weights_tex, xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[0], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, xy_config_count, feature_map_config_count);
#define launch_backprop_kernel(block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_kernel_const(1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_kernel_const(2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_kernel_const(3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_kernel_const(4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_kernel_const(5, single_output_feature_map_group); \
break; \
};
#define launch_update_weights_exact_kernel_const(window_width_const, single_output_y_group_const) \
hipLaunchKernelGGL(( convolution_2d_update_weights_exact_upd_kernel_kepler<window_width_const, single_output_y_group_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *data[0], input_tex, *training_speed[0], output_y_weight_y_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_y_group_count, texture_offset, entry_count, different_input, output_y_weight_y_config_count, feature_map_config_count);
#define launch_update_weights_exact_kernel(window_width, single_output_y_group_const) \
switch (window_width) \
{ \
case 1: \
launch_update_weights_exact_kernel_const(1, single_output_y_group_const); \
break; \
case 2: \
launch_update_weights_exact_kernel_const(2, single_output_y_group_const); \
break; \
case 3: \
launch_update_weights_exact_kernel_const(3, single_output_y_group_const); \
break; \
case 4: \
launch_update_weights_exact_kernel_const(4, single_output_y_group_const); \
break; \
case 5: \
launch_update_weights_exact_kernel_const(5, single_output_y_group_const); \
break; \
case 6: \
launch_update_weights_exact_kernel_const(6, single_output_y_group_const); \
break; \
case 7: \
launch_update_weights_exact_kernel_const(7, single_output_y_group_const); \
break; \
case 8: \
launch_update_weights_exact_kernel_const(8, single_output_y_group_const); \
break; \
case 9: \
launch_update_weights_exact_kernel_const(9, single_output_y_group_const); \
break; \
case 10: \
launch_update_weights_exact_kernel_const(10, single_output_y_group_const); \
break; \
};
#define launch_update_weights_kernel_const(single_output_y_group_const) \
hipLaunchKernelGGL(( convolution_2d_update_weights_upd_kernel_kepler<single_output_y_group_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *data[0], input_tex, *training_speed[0], output_y_weight_y_weight_x_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[0], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_y_group_count, texture_offset, entry_count, different_input, output_y_weight_y_weight_x_config_count, feature_map_config_count);
void convolution_2d_layer_updater_cuda_kepler::enqueue_test(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
if (dynamic_memobjects[0] == 0)
dynamic_memobjects[0] = cuda_texture_smart_ptr(new cuda_texture(input_neurons_buffer));
cuda_texture& input_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[0].get()));
int texture_offset = offset_input_entry_id * input_elem_count_per_entry;
if (dynamic_memobjects[1] == 0)
dynamic_memobjects[1] = cuda_texture_smart_ptr(new cuda_texture(data[0]));
cuda_texture& weights_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[1].get()));
int xy_config_count = forward_x_block_count * output_configuration_specific.dimension_sizes[1];
const xy_config * xy_config_list = static_cast<const xy_config *>((const void *)*additional_buffers[0]);
int feature_map_config_count = forward_input_feature_map_group_count * forward_output_feature_map_block_count;
const feature_map_config * feature_map_config_list = static_cast<const feature_map_config *>((const void *)*additional_buffers[1]);
if (forward_input_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*output_neurons_buffer,
0.0F,
output_elem_count_per_entry * entry_count,
stream_id);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
xy_config_count,
feature_map_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (forward_input_feature_map_group_count == 1)
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, true);
}
else
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, false);
}
}
else
{
if (forward_input_feature_map_group_count == 1)
{
launch_kernel(forward_x_block_size, true);
}
else
{
launch_kernel(forward_x_block_size, false);
}
}
}
void convolution_2d_layer_updater_cuda_kepler::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
if (!different_input)
throw neural_network_exception("convolution_2d_layer_updater_cuda_kepler is not able to backprop to the same input");
if (!backprop_required)
throw neural_network_exception("convolution_2d_layer_updater_cuda_kepler is not configured to do backprop but requested to");
if (dynamic_memobjects[2] == 0)
dynamic_memobjects[2] = cuda_texture_smart_ptr(new cuda_texture(output_errors_buffer));
cuda_texture& output_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[2].get()));
if (dynamic_memobjects[1] == 0)
dynamic_memobjects[1] = cuda_texture_smart_ptr(new cuda_texture(data[0]));
cuda_texture& weights_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[1].get()));
int xy_config_count = backward_x_block_count * input_configuration_specific.dimension_sizes[1];
const xy_config * xy_config_list = static_cast<const xy_config *>((const void *)*additional_buffers[4]);
int feature_map_config_count = backward_output_feature_map_group_count * backward_input_feature_map_block_count;
const feature_map_config * feature_map_config_list = static_cast<const feature_map_config *>((const void *)*additional_buffers[5]);
if (backward_output_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*input_errors_buffer,
0.0F,
input_elem_count_per_entry * entry_count,
stream_id);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
xy_config_count,
feature_map_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, true);
}
else
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, false);
}
}
else
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_kernel(backward_x_block_size, true);
}
else
{
launch_backprop_kernel(backward_x_block_size, false);
}
}
}
void convolution_2d_layer_updater_cuda_kepler::enqueue_update_weights(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& training_speed,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
// Update biases
{
int threadblock_size = get_threadblock_size_biases(output_elem_count_per_feature_map);
dim3 grid_size(1, output_configuration_specific.feature_map_count, entry_count);
dim3 block_size(threadblock_size, 1, 1);
int min_iteration_count = output_elem_count_per_feature_map / threadblock_size;
hipLaunchKernelGGL(( convolution_2d_update_biases_upd_kernel_kepler), dim3(grid_size), dim3(block_size), 0, stream_id,
*data[1],
*output_errors_buffer,
*training_speed[1],
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
min_iteration_count);
}
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
cuda_safe_call(hipBindTexture(0, output_tex_ref, *output_errors_buffer, desc, output_elem_count_per_entry * entry_count * sizeof(float)));
if (dynamic_memobjects[0] == 0)
dynamic_memobjects[0] = cuda_texture_smart_ptr(new cuda_texture(input_neurons_buffer));
cuda_texture& input_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[0].get()));
int texture_offset = offset_input_entry_id * input_elem_count_per_entry;
int feature_map_config_count = updater_output_feature_map_block_count * input_configuration_specific.feature_map_count;
const feature_map_config * feature_map_config_list = static_cast<const feature_map_config *>((const void *)*additional_buffers[3]);
// Update weights
{
if (updater_window_x_block_count == 1)
{
int output_y_weight_y_config_count = updater_output_y_group_count * window_sizes[1];
const output_y_weight_y_config * output_y_weight_y_config_list = static_cast<const output_y_weight_y_config *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_y_weight_y_config_count,
feature_map_config_count,
entry_count);
if (updater_output_y_group_count == 1)
{
launch_update_weights_exact_kernel(window_sizes[0], true);
}
else
{
launch_update_weights_exact_kernel(window_sizes[0], false);
}
}
else
{
int output_y_weight_y_weight_x_config_count = updater_output_y_group_count * window_sizes[1] * updater_window_x_block_count;
const output_y_weight_y_weight_x_config * output_y_weight_y_weight_x_config_list = static_cast<const output_y_weight_y_weight_x_config *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_y_weight_y_weight_x_config_count,
feature_map_config_count,
entry_count);
if (updater_output_y_group_count == 1)
{
launch_update_weights_kernel_const(true);
}
else
{
launch_update_weights_kernel_const(false);
}
}
}
}
int convolution_2d_layer_updater_cuda_kepler::get_block_size(int width)
{
int block_count = (width + MAX_BLOCK_SIZE - 1) / MAX_BLOCK_SIZE;
int block_size = (width + block_count - 1) / block_count;
return block_size;
}
void convolution_2d_layer_updater_cuda_kepler::updater_configured()
{
std::tr1::shared_ptr<const convolution_layer> layer_derived = std::tr1::dynamic_pointer_cast<const convolution_layer>(layer_schema);
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_sizes.push_back(static_cast<int>(*it));
forward_x_block_size = get_block_size(output_configuration_specific.dimension_sizes[0]);
forward_x_block_count = (output_configuration_specific.dimension_sizes[0] + forward_x_block_size - 1) / forward_x_block_size;
forward_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_window_x_block_count = (window_sizes[0] <= MAX_WINDOW_WIDTH) ? 1 : (window_sizes[0] + WINDOW_WIDTH_LOCAL - 1) / WINDOW_WIDTH_LOCAL;
if (backprop_required)
{
backward_x_block_size = get_block_size(input_configuration_specific.dimension_sizes[0]);
backward_x_block_count = (input_configuration_specific.dimension_sizes[0] + backward_x_block_size - 1) / backward_x_block_size;
backward_input_feature_map_block_count = (input_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
}
}
bool convolution_2d_layer_updater_cuda_kepler::is_in_place_backprop() const
{
return false;
}
std::vector<unsigned int> convolution_2d_layer_updater_cuda_kepler::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
res.push_back(output_elem_count_per_entry);
return res;
}
int convolution_2d_layer_updater_cuda_kepler::get_threadblock_size_biases(int output_neuron_count)
{
int threadblock_size;
if (output_neuron_count < 128)
{
threadblock_size = (output_neuron_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (output_neuron_count + 128 - 1) / 128;
threadblock_size = (output_neuron_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
std::vector<size_t> convolution_2d_layer_updater_cuda_kepler::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(xy_config) * forward_x_block_count * output_configuration_specific.dimension_sizes[1]);
res.push_back(sizeof(feature_map_config) * input_configuration_specific.feature_map_count * forward_output_feature_map_block_count);
res.push_back(sizeof(output_y_weight_y_config) * window_sizes[1] * output_configuration_specific.dimension_sizes[1] * updater_window_x_block_count);
res.push_back(sizeof(feature_map_config) * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count);
if (backprop_required)
{
res.push_back(sizeof(xy_config) * backward_x_block_count * input_configuration_specific.dimension_sizes[1]);
res.push_back(sizeof(feature_map_config) * output_configuration_specific.feature_map_count * backward_input_feature_map_block_count);
}
return res;
}
void convolution_2d_layer_updater_cuda_kepler::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<xy_config> task_list;
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
for(int x = 0; x < forward_x_block_count; ++x)
task_list.push_back(xy_config(y, x * forward_x_block_size));
cuda_safe_call(hipMemcpy(*additional_buffers[0], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), hipMemcpyHostToDevice));
}
{
std::vector<feature_map_config> task_list;
for(int input_feature_map_group_id = 0; input_feature_map_group_id < forward_input_feature_map_group_count; ++input_feature_map_group_id)
for(int output_feature_map_id = 0; output_feature_map_id < forward_output_feature_map_block_count; ++output_feature_map_id)
task_list.push_back(feature_map_config(input_feature_map_group_id * forward_input_feature_map_group_size, output_feature_map_id * FEATURE_MAP_BLOCK_SIZE));
cuda_safe_call(hipMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(feature_map_config) * task_list.size(), hipMemcpyHostToDevice));
}
if (updater_window_x_block_count == 1)
{
std::vector<output_y_weight_y_config> task_list;
for(int output_y = 0; output_y < updater_output_y_group_count; ++output_y)
for(int weight_y = 0; weight_y < window_sizes[1]; ++weight_y)
task_list.push_back(output_y_weight_y_config(output_y, weight_y));
cuda_safe_call(hipMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), hipMemcpyHostToDevice));
}
else
{
std::vector<output_y_weight_y_weight_x_config> task_list;
for(int output_y = 0; output_y < updater_output_y_group_count; ++output_y)
for(int weight_y = 0; weight_y < window_sizes[1]; ++weight_y)
for(int weight_x = 0; weight_x < updater_window_x_block_count; ++weight_x)
task_list.push_back(output_y_weight_y_weight_x_config(output_y, weight_y, weight_x * FEATURE_MAP_BLOCK_SIZE));
cuda_safe_call(hipMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), hipMemcpyHostToDevice));
}
{
std::vector<std::pair<int, int> > pair_list;
cuda_util::fill_tiling_pattern(input_configuration_specific.feature_map_count, updater_output_feature_map_block_count, pair_list);
std::vector<feature_map_config> task_list;
for(std::vector<std::pair<int, int> >::const_iterator it = pair_list.begin(); it != pair_list.end(); ++it)
task_list.push_back(feature_map_config(it->first, it->second * FEATURE_MAP_BLOCK_SIZE));
cuda_safe_call(hipMemcpy(*additional_buffers[3], &(*task_list.begin()), sizeof(feature_map_config) * task_list.size(), hipMemcpyHostToDevice));
}
if (backprop_required)
{
{
std::vector<xy_config> task_list;
for(int y = 0; y < input_configuration_specific.dimension_sizes[1]; ++y)
for(int x = 0; x < backward_x_block_count; ++x)
task_list.push_back(xy_config(y, x * backward_x_block_size + (backward_x_block_size - 1)));
cuda_safe_call(hipMemcpy(*additional_buffers[4], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), hipMemcpyHostToDevice));
}
{
std::vector<feature_map_config> task_list;
for(int output_feature_map_group_id = 0; output_feature_map_group_id < backward_output_feature_map_group_count; ++output_feature_map_group_id)
for(int input_feature_map_id = 0; input_feature_map_id < backward_input_feature_map_block_count; ++input_feature_map_id)
task_list.push_back(feature_map_config(input_feature_map_id * FEATURE_MAP_BLOCK_SIZE, output_feature_map_group_id * backward_output_feature_map_group_size));
cuda_safe_call(hipMemcpy(*additional_buffers[5], &(*task_list.begin()), sizeof(feature_map_config) * task_list.size(), hipMemcpyHostToDevice));
}
}
}
void convolution_2d_layer_updater_cuda_kepler::set_max_entry_count(unsigned int max_entry_count)
{
forward_input_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
forward_x_block_count * output_configuration_specific.dimension_sizes[1] * forward_output_feature_map_block_count * max_entry_count,
input_configuration_specific.feature_map_count);
forward_input_feature_map_group_size = (input_configuration_specific.feature_map_count + forward_input_feature_map_group_count - 1) / forward_input_feature_map_group_count;
updater_output_y_group_count = cuda_util::get_group_count(
*cuda_config,
updater_output_feature_map_block_count * input_configuration_specific.feature_map_count * window_sizes[1] * max_entry_count * updater_window_x_block_count,
output_configuration_specific.dimension_sizes[1]);
updater_output_y_group_size = (output_configuration_specific.dimension_sizes[1] + updater_output_y_group_count - 1) / updater_output_y_group_count;
if (backprop_required)
{
backward_output_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
backward_x_block_count * input_configuration_specific.dimension_sizes[1] * backward_input_feature_map_block_count * max_entry_count,
output_configuration_specific.feature_map_count);
backward_output_feature_map_group_size = (output_configuration_specific.feature_map_count + backward_output_feature_map_group_count - 1) / backward_output_feature_map_group_count;
}
}
int convolution_2d_layer_updater_cuda_kepler::get_dynamic_memobject_count() const
{
return 3;
}
}
}
| b6f9b80f2e1163ee7680daf7b591fa30ab29aa7b.cu | /*
* Copyright 2011-2013 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "convolution_2d_layer_updater_cuda_kepler.h"
#include <cuda_runtime.h>
#include <boost/format.hpp>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "cuda_texture.h"
#include "../convolution_layer.h"
texture<float, cudaTextureType1D, cudaReadModeElementType> output_tex_ref;
#define FEATURE_MAP_BLOCK_SIZE 4
#define WINDOW_WIDTH_LOCAL 4
struct __align__(4) xy_config
{
xy_config(int y, int x)
{
this->xy_pair = (((unsigned int)y) << 16) | (unsigned int)x;
}
unsigned int xy_pair;
};
struct __align__(4) feature_map_config
{
feature_map_config(int input_feature_map_id, int output_feature_map_id)
{
this->feature_map_pair = (((unsigned int)input_feature_map_id) << 16) | (unsigned int)output_feature_map_id;
}
unsigned int feature_map_pair;
};
struct __align__(4) output_y_weight_y_config
{
output_y_weight_y_config(int output_y, int weight_y)
{
this->output_y_window_y_pair = (((unsigned int)output_y) << 16) | (unsigned int)weight_y;
}
unsigned int output_y_window_y_pair;
};
struct __align__(4) output_y_weight_y_weight_x_config
{
output_y_weight_y_weight_x_config(int output_y, int weight_y, int weight_x)
{
this->output_y_window_y_window_x_pair = (((unsigned int)output_y) << 16) | (((unsigned int)weight_y) << 8) | ((unsigned int)weight_x);
}
unsigned int output_y_window_y_window_x_pair;
};
template<int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_2d_tex_upd_kernel_kepler(
float * __restrict output,
cudaTextureObject_t input_tex,
cudaTextureObject_t weights_tex,
const float * __restrict biases,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_width,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int base_input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_output_feature_map = window_width * window_height * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_height) + y) * input_width + x + texture_offset;
int weights_offset = ((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * window_width * window_height;
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll 4
for(int input_x = 0; input_x < window_width; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_output_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch<float>(input_tex, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
weights_offset++;
input_elem_id++;
}
input_elem_id += input_width - window_width;
}
input_elem_id += input_width * (input_height - window_height);
}
float * base_output = output + ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_input_feature_map_group>
__global__ void convolution_2d_tex_exact_upd_kernel_kepler(
float * __restrict output,
cudaTextureObject_t input_tex,
cudaTextureObject_t weights_tex,
const float * __restrict biases,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int input_feature_map_group_size,
int texture_offset,
int entry_count,
bool different_input,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int base_input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_output_feature_map = WINDOW_WIDTH * window_height * input_feature_map_count;
int input_elem_id = ((((different_input ? entry_id * input_feature_map_count : 0) + base_input_feature_map_id) * input_height) + y) * input_width + x + texture_offset;
int weights_offset = ((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + base_input_feature_map_id) * WINDOW_WIDTH * window_height;
int iteration_count = min(input_feature_map_group_size, input_feature_map_count - base_input_feature_map_id);
float initial_values[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
initial_values[i] = 0.0F;
if (base_input_feature_map_id == 0)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
if (i < output_feature_map_count - output_feature_map_id)
initial_values[i] = biases[entry_id * output_feature_map_count + output_feature_map_id + i];
}
float sums[BLOCK_SIZE * FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
sums[i * BLOCK_SIZE + j] = initial_values[i];
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_output_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
float inp = tex1Dfetch<float>(input_tex, input_elem_id + j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * weight_list[i];
}
weights_offset++;
input_elem_id++;
}
input_elem_id += input_width - WINDOW_WIDTH;
}
input_elem_id += input_width * (input_height - window_height);
}
float * base_output = output + ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + y) * output_width + x;
int output_neuron_count_per_feature_map = output_height * output_width;
if (single_input_feature_map_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
base_output[output_neuron_count_per_feature_map * i + j] = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j < output_width - x)
atomicAdd(base_output + output_neuron_count_per_feature_map * i + j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
__global__ void convolution_2d_update_biases_upd_kernel_kepler(
float * __restrict biases,
const float * __restrict output_errors,
const float * __restrict training_speed,
int output_feature_map_count,
int output_elem_count_per_feature_map,
int min_iteration_count)
{
int thread_id = threadIdx.x;
int output_feature_map_id = blockIdx.y;
int entry_id = blockIdx.z;
int threadblock_size = blockDim.x;
float sum = 0.0F;
const float * current_error = output_errors + (entry_id * output_feature_map_count + output_feature_map_id) * output_elem_count_per_feature_map;
int current_output_neuron_id = thread_id;
for(int i = 0; i < min_iteration_count; ++i)
{
sum += current_error[current_output_neuron_id];
current_output_neuron_id += threadblock_size;
}
if (current_output_neuron_id < output_elem_count_per_feature_map)
sum += current_error[current_output_neuron_id];
int lane_id = thread_id & 31;
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
sum += __shfl_down(sum, tx);
}
if (lane_id == 0)
{
int offset = entry_id * output_feature_map_count + output_feature_map_id;
float current_training_speed_val = training_speed[offset];
atomicAdd(biases + offset, sum * current_training_speed_val);
}
}
template<int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_2d_deriviative_tex_upd_kernel_kepler(
float * __restrict input_errors,
cudaTextureObject_t output_tex,
cudaTextureObject_t weights_tex,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_width,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int base_output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_input_feature_map = window_width * window_height;
int output_elem_id = ((entry_id * output_feature_map_count + base_output_feature_map_id) * output_height + y) * output_width + x;
int weights_offset = ((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_width * window_height;
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit1 = (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
int input_x = 0;
#pragma unroll 1
for(; input_x < (window_width - (WINDOW_WIDTH_LOCAL - 1)); input_x += WINDOW_WIDTH_LOCAL)
{
float output_vals[BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH_LOCAL - 1; ++i)
{
bool b_fit2 = b_fit1 && (i > min_x_exclusive) && (i <= max_x_inclusive);;
if (b_fit2)
output_vals[i] = tex1Dfetch<float>(output_tex, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
output_elem_id -= WINDOW_WIDTH_LOCAL;
#pragma unroll
for(int input_x_local = 0; input_x_local < WINDOW_WIDTH_LOCAL; ++input_x_local)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_input_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x_local + j] * weight_list[i];
}
weights_offset++;
}
}
#pragma unroll 1
for(; input_x < window_width; ++input_x)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
bool b_fit2 = b_fit1 && (input_x + j > min_x_exclusive) && (input_x + j <= max_x_inclusive);
if (b_fit2)
{
float inp = tex1Dfetch<float>(output_tex, output_elem_id - j);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += inp * tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_input_feature_map * i);
}
}
weights_offset++;
output_elem_id--;
}
output_elem_id += window_width - output_width;
}
weights_offset += window_width * window_height * (input_feature_map_count - 1);
output_elem_id += output_width * (output_height + window_height);
}
float * base_input = input_errors + ((entry_id * input_feature_map_count + input_feature_map_id) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<int WINDOW_WIDTH, int BLOCK_SIZE, bool single_output_feature_map_group>
__global__ void convolution_2d_deriviative_tex_exact_upd_kernel_kepler(
float * __restrict input_errors,
cudaTextureObject_t output_tex,
cudaTextureObject_t weights_tex,
const xy_config * __restrict xy_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_feature_map_group_size,
int entry_count,
int xy_config_count,
int feature_map_config_count)
{
int xy_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
bool in_bounds = (entry_id < entry_count) && (xy_config_id < xy_config_count) && (feature_map_config_id < feature_map_config_count);
if (in_bounds)
{
xy_config xyc = xy_config_list[xy_config_id];
int x = xyc.xy_pair & 0xFFFF;
int y = xyc.xy_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int base_output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int weight_count_per_input_feature_map = WINDOW_WIDTH * window_height;
int output_elem_id = ((entry_id * output_feature_map_count + base_output_feature_map_id) * output_height + y) * output_width + x;
int weights_offset = ((entry_id * output_feature_map_count + base_output_feature_map_id) * input_feature_map_count + input_feature_map_id) * WINDOW_WIDTH * window_height;
int iteration_count = min(output_feature_map_group_size, output_feature_map_count - base_output_feature_map_id);
float sums[FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * BLOCK_SIZE; ++i)
sums[i] = 0.0F;
int min_y_exclusive = y - output_height;
int max_y_inclusive = y;
int min_x_exclusive = x - output_width;
int max_x_inclusive = x;
unsigned int mask = 0;
for(int i = BLOCK_SIZE + WINDOW_WIDTH - 2; i >= 0; --i)
mask = mask << 1 | (((i > min_x_exclusive) && (i <= max_x_inclusive)) ? 1 : 0);
for(int i = 0; i < iteration_count; ++i)
{
for(int input_y = 0; input_y < window_height; ++input_y)
{
bool b_fit1 = (input_y > min_y_exclusive) && (input_y <= max_y_inclusive);
float output_vals[BLOCK_SIZE + WINDOW_WIDTH - 1];
#pragma unroll
for(int i = 0; i < BLOCK_SIZE + WINDOW_WIDTH - 1; ++i)
{
bool b_fit2 = b_fit1 && (((1 << i) & mask) != 0);
if (b_fit2)
output_vals[i] = tex1Dfetch<float>(output_tex, output_elem_id - i);
else
output_vals[i] = 0.0F;
}
#pragma unroll
for(int input_x = 0; input_x < WINDOW_WIDTH; ++input_x)
{
float weight_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
weight_list[i] = tex1Dfetch<float>(weights_tex, weights_offset + weight_count_per_input_feature_map * i);
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
sums[i * BLOCK_SIZE + j] += output_vals[input_x + j] * weight_list[i];
}
weights_offset++;
}
output_elem_id -= output_width;
}
weights_offset += WINDOW_WIDTH * window_height * (input_feature_map_count - 1);
output_elem_id += output_width * (output_height + window_height);
}
float * base_input = input_errors + ((entry_id * input_feature_map_count + input_feature_map_id) * input_height + y) * input_width + x;
int input_neuron_count_per_feature_map = input_height * input_width;
if (single_output_feature_map_group == 1)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
*(base_input + input_neuron_count_per_feature_map * i - j) = sums[i * BLOCK_SIZE + j];
}
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < input_feature_map_count - input_feature_map_id)
{
#pragma unroll
for(int j = 0; j < BLOCK_SIZE; ++j)
{
if (j > x - input_width)
atomicAdd(base_input + input_neuron_count_per_feature_map * i - j, sums[i * BLOCK_SIZE + j]);
}
}
}
}
}
}
template<bool single_output_y_group>
__global__ void convolution_2d_update_weights_upd_kernel_kepler(
float * __restrict weights,
cudaTextureObject_t input_tex,
const float * __restrict training_speed,
const output_y_weight_y_weight_x_config * __restrict output_y_weight_y_weight_x_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_width,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_y_group_count,
int texture_offset,
int entry_count,
bool different_input,
int output_y_weight_y_weight_x_config_count,
int feature_map_config_count)
{
int output_y_weight_y_weight_x_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((output_y_weight_y_weight_x_config_id < output_y_weight_y_weight_x_config_count) && (feature_map_config_id < feature_map_config_count) && (entry_id < entry_count))
{
output_y_weight_y_weight_x_config yw = output_y_weight_y_weight_x_config_list[output_y_weight_y_weight_x_config_id];
int weight_x = yw.output_y_window_y_window_x_pair & 0xFF;
int weight_y = (yw.output_y_window_y_window_x_pair & 0xFFFF) >> 8;
int output_y_start_id = yw.output_y_window_y_window_x_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int output_neuron_count_per_feature_map = output_width * output_height;
int output_elem_id = ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + output_y_start_id) * output_width;
int input_elem_id = (((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_height + weight_y + output_y_start_id) * input_width + texture_offset + weight_x;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH_LOCAL; ++i)
sums[i] = 0.0F;
for(int output_y = output_y_start_id; output_y < output_height; output_y += output_y_group_count)
{
float input_buf[WINDOW_WIDTH_LOCAL];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH_LOCAL; ++i)
{
input_buf[i] = tex1Dfetch<float>(input_tex, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = tex1Dfetch(output_tex_ref, output_elem_id + output_neuron_count_per_feature_map * i);
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH_LOCAL - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH_LOCAL - 1] = tex1Dfetch<float>(input_tex, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
sums[i * WINDOW_WIDTH_LOCAL + j] += output_error_list[i] * input_buf[j];
output_elem_id++;
input_elem_id++;
}
output_elem_id += output_width * (output_y_group_count - 1);
input_elem_id += input_width * (output_y_group_count - 1) + (window_width - WINDOW_WIDTH_LOCAL);
}
int offset = (((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_height + weight_y) * window_width + weight_x;
int weight_count_per_output_feature_map = input_feature_map_count * window_height * window_width;
float * cur_weights = weights + offset;
const float * cur_training_speed = training_speed + offset;
if (single_output_y_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH_LOCAL + j] * cur_training_speed[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH_LOCAL; ++j)
if (j < window_width - weight_x)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH_LOCAL + j] * cur_training_speed[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
template<int WINDOW_WIDTH, bool single_output_y_group>
__global__ void convolution_2d_update_weights_exact_upd_kernel_kepler(
float * __restrict weights,
cudaTextureObject_t input_tex,
const float * __restrict training_speed,
const output_y_weight_y_config * __restrict output_y_weight_y_config_list,
const feature_map_config * __restrict feature_map_config_list,
int output_width,
int output_height,
int input_width,
int input_height,
int window_height,
int input_feature_map_count,
int output_feature_map_count,
int output_y_group_count,
int texture_offset,
int entry_count,
bool different_input,
int output_y_weight_y_config_count,
int feature_map_config_count)
{
int output_y_weight_y_config_id = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_config_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((output_y_weight_y_config_id < output_y_weight_y_config_count) && (feature_map_config_id < feature_map_config_count) && (entry_id < entry_count))
{
output_y_weight_y_config yw = output_y_weight_y_config_list[output_y_weight_y_config_id];
int weight_y = yw.output_y_window_y_pair & 0xFFFF;
int output_y_start_id = yw.output_y_window_y_pair >> 16;
feature_map_config fmc = feature_map_config_list[feature_map_config_id];
int output_feature_map_id = fmc.feature_map_pair & 0xFFFF;
int input_feature_map_id = fmc.feature_map_pair >> 16;
int output_neuron_count_per_feature_map = output_width * output_height;
int output_elem_id = ((entry_id * output_feature_map_count + output_feature_map_id) * output_height + output_y_start_id) * output_width;
int input_elem_id = (((different_input ? entry_id * input_feature_map_count : 0) + input_feature_map_id) * input_height + weight_y + output_y_start_id) * input_width + texture_offset;
float sums[FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE * WINDOW_WIDTH; ++i)
sums[i] = 0.0F;
for(int output_y = output_y_start_id; output_y < output_height; output_y += output_y_group_count)
{
float input_buf[WINDOW_WIDTH];
#pragma unroll
for(int i = 1; i < WINDOW_WIDTH; ++i)
{
input_buf[i] = tex1Dfetch<float>(input_tex, input_elem_id);
++input_elem_id;
}
for(int x = 0; x < output_width; ++x)
{
float output_error_list[FEATURE_MAP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
output_error_list[i] = tex1Dfetch(output_tex_ref, output_elem_id + output_neuron_count_per_feature_map * i);
#pragma unroll
for(int i = 0; i < WINDOW_WIDTH - 1; ++i)
input_buf[i] = input_buf[i + 1];
input_buf[WINDOW_WIDTH - 1] = tex1Dfetch<float>(input_tex, input_elem_id);
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
sums[i * WINDOW_WIDTH + j] += output_error_list[i] * input_buf[j];
output_elem_id++;
input_elem_id++;
}
output_elem_id += output_width * (output_y_group_count - 1);
input_elem_id += input_width * (output_y_group_count - 1);
}
int offset = (((entry_id * output_feature_map_count + output_feature_map_id) * input_feature_map_count + input_feature_map_id) * window_height + weight_y) * WINDOW_WIDTH;
int weight_count_per_output_feature_map = input_feature_map_count * window_height * WINDOW_WIDTH;
float * cur_weights = weights + offset;
const float * cur_training_speed = training_speed + offset;
if (single_output_y_group)
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
cur_weights[i * weight_count_per_output_feature_map + j] += sums[i * WINDOW_WIDTH + j] * cur_training_speed[i * weight_count_per_output_feature_map + j];
}
}
}
else
{
#pragma unroll
for(int i = 0; i < FEATURE_MAP_BLOCK_SIZE; ++i)
{
if (i < output_feature_map_count - output_feature_map_id)
{
#pragma unroll
for(int j = 0; j < WINDOW_WIDTH; ++j)
atomicAdd(cur_weights + i * weight_count_per_output_feature_map + j, sums[i * WINDOW_WIDTH + j] * cur_training_speed[i * weight_count_per_output_feature_map + j]);
}
}
}
}
}
namespace nnforge
{
namespace cuda
{
convolution_2d_layer_updater_cuda_kepler::convolution_2d_layer_updater_cuda_kepler()
{
output_tex_ref.addressMode[0] = cudaAddressModeBorder;
output_tex_ref.normalized = false;
}
convolution_2d_layer_updater_cuda_kepler::~convolution_2d_layer_updater_cuda_kepler()
{
}
#define MAX_BLOCK_SIZE 5
#define MAX_WINDOW_WIDTH 10
#define launch_exact_kernel_const_const(window_width_const, block_size_const, single_input_feature_map_group) \
convolution_2d_tex_exact_upd_kernel_kepler<window_width_const,block_size_const,single_input_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*output_neurons_buffer, input_tex, weights_tex, *data[1], xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, xy_config_count, feature_map_config_count);
#define launch_exact_kernel_const(window_width, block_size_const, single_input_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, block_size_const, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const_const(2, block_size_const, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const_const(3, block_size_const, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const_const(4, block_size_const, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const_const(5, block_size_const, single_input_feature_map_group); \
break; \
case 6: \
launch_exact_kernel_const_const(6, block_size_const, single_input_feature_map_group); \
break; \
case 7: \
launch_exact_kernel_const_const(7, block_size_const, single_input_feature_map_group); \
break; \
case 8: \
launch_exact_kernel_const_const(8, block_size_const, single_input_feature_map_group); \
break; \
case 9: \
launch_exact_kernel_const_const(9, block_size_const, single_input_feature_map_group); \
break; \
case 10: \
launch_exact_kernel_const_const(10, block_size_const, single_input_feature_map_group); \
break; \
};
#define launch_exact_kernel(window_width, block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1, single_input_feature_map_group); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2, single_input_feature_map_group); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3, single_input_feature_map_group); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4, single_input_feature_map_group); \
break; \
case 5: \
launch_exact_kernel_const(window_width, 5, single_input_feature_map_group); \
break; \
};
#define launch_kernel_const(block_size_const, single_input_feature_map_group) \
convolution_2d_tex_upd_kernel_kepler<block_size_const,single_input_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*output_neurons_buffer, input_tex, weights_tex, *data[1], xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[0], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, forward_input_feature_map_group_size, texture_offset, entry_count, different_input, xy_config_count, feature_map_config_count);
#define launch_kernel(block_size, single_input_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_kernel_const(1, single_input_feature_map_group); \
break; \
case 2: \
launch_kernel_const(2, single_input_feature_map_group); \
break; \
case 3: \
launch_kernel_const(3, single_input_feature_map_group); \
break; \
case 4: \
launch_kernel_const(4, single_input_feature_map_group); \
break; \
case 5: \
launch_kernel_const(5, single_input_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel_const_const(window_width_const, block_size_const, single_output_feature_map_group) \
convolution_2d_deriviative_tex_exact_upd_kernel_kepler<window_width_const,block_size_const,single_output_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*input_errors_buffer, output_tex, weights_tex, xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, xy_config_count, feature_map_config_count);
#define launch_backprop_exact_kernel_const(window_width, block_size_const, single_output_feature_map_group) \
switch (window_width) \
{ \
case 1: \
launch_backprop_exact_kernel_const_const(1, block_size_const, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const_const(2, block_size_const, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const_const(3, block_size_const, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const_const(4, block_size_const, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const_const(5, block_size_const, single_output_feature_map_group); \
break; \
case 6: \
launch_backprop_exact_kernel_const_const(6, block_size_const, single_output_feature_map_group); \
break; \
case 7: \
launch_backprop_exact_kernel_const_const(7, block_size_const, single_output_feature_map_group); \
break; \
case 8: \
launch_backprop_exact_kernel_const_const(8, block_size_const, single_output_feature_map_group); \
break; \
case 9: \
launch_backprop_exact_kernel_const_const(9, block_size_const, single_output_feature_map_group); \
break; \
case 10: \
launch_backprop_exact_kernel_const_const(10, block_size_const, single_output_feature_map_group); \
break; \
};
#define launch_backprop_exact_kernel(window_width, block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_exact_kernel_const(window_width, 1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_exact_kernel_const(window_width, 2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_exact_kernel_const(window_width, 3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_exact_kernel_const(window_width, 4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_exact_kernel_const(window_width, 5, single_output_feature_map_group); \
break; \
};
#define launch_backprop_kernel_const(block_size_const, single_output_feature_map_group) \
convolution_2d_deriviative_tex_upd_kernel_kepler<block_size_const,single_output_feature_map_group><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*input_errors_buffer, output_tex, weights_tex, xy_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[0], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, backward_output_feature_map_group_size, entry_count, xy_config_count, feature_map_config_count);
#define launch_backprop_kernel(block_size, single_output_feature_map_group) \
switch (block_size) \
{ \
case 1: \
launch_backprop_kernel_const(1, single_output_feature_map_group); \
break; \
case 2: \
launch_backprop_kernel_const(2, single_output_feature_map_group); \
break; \
case 3: \
launch_backprop_kernel_const(3, single_output_feature_map_group); \
break; \
case 4: \
launch_backprop_kernel_const(4, single_output_feature_map_group); \
break; \
case 5: \
launch_backprop_kernel_const(5, single_output_feature_map_group); \
break; \
};
#define launch_update_weights_exact_kernel_const(window_width_const, single_output_y_group_const) \
convolution_2d_update_weights_exact_upd_kernel_kepler<window_width_const, single_output_y_group_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*data[0], input_tex, *training_speed[0], output_y_weight_y_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_y_group_count, texture_offset, entry_count, different_input, output_y_weight_y_config_count, feature_map_config_count);
#define launch_update_weights_exact_kernel(window_width, single_output_y_group_const) \
switch (window_width) \
{ \
case 1: \
launch_update_weights_exact_kernel_const(1, single_output_y_group_const); \
break; \
case 2: \
launch_update_weights_exact_kernel_const(2, single_output_y_group_const); \
break; \
case 3: \
launch_update_weights_exact_kernel_const(3, single_output_y_group_const); \
break; \
case 4: \
launch_update_weights_exact_kernel_const(4, single_output_y_group_const); \
break; \
case 5: \
launch_update_weights_exact_kernel_const(5, single_output_y_group_const); \
break; \
case 6: \
launch_update_weights_exact_kernel_const(6, single_output_y_group_const); \
break; \
case 7: \
launch_update_weights_exact_kernel_const(7, single_output_y_group_const); \
break; \
case 8: \
launch_update_weights_exact_kernel_const(8, single_output_y_group_const); \
break; \
case 9: \
launch_update_weights_exact_kernel_const(9, single_output_y_group_const); \
break; \
case 10: \
launch_update_weights_exact_kernel_const(10, single_output_y_group_const); \
break; \
};
#define launch_update_weights_kernel_const(single_output_y_group_const) \
convolution_2d_update_weights_upd_kernel_kepler<single_output_y_group_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(*data[0], input_tex, *training_speed[0], output_y_weight_y_weight_x_config_list, feature_map_config_list, output_configuration_specific.dimension_sizes[0], output_configuration_specific.dimension_sizes[1], input_configuration_specific.dimension_sizes[0], input_configuration_specific.dimension_sizes[1], window_sizes[0], window_sizes[1], input_configuration_specific.feature_map_count, output_configuration_specific.feature_map_count, updater_output_y_group_count, texture_offset, entry_count, different_input, output_y_weight_y_weight_x_config_count, feature_map_config_count);
void convolution_2d_layer_updater_cuda_kepler::enqueue_test(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
if (dynamic_memobjects[0] == 0)
dynamic_memobjects[0] = cuda_texture_smart_ptr(new cuda_texture(input_neurons_buffer));
cuda_texture& input_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[0].get()));
int texture_offset = offset_input_entry_id * input_elem_count_per_entry;
if (dynamic_memobjects[1] == 0)
dynamic_memobjects[1] = cuda_texture_smart_ptr(new cuda_texture(data[0]));
cuda_texture& weights_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[1].get()));
int xy_config_count = forward_x_block_count * output_configuration_specific.dimension_sizes[1];
const xy_config * xy_config_list = static_cast<const xy_config *>((const void *)*additional_buffers[0]);
int feature_map_config_count = forward_input_feature_map_group_count * forward_output_feature_map_block_count;
const feature_map_config * feature_map_config_list = static_cast<const feature_map_config *>((const void *)*additional_buffers[1]);
if (forward_input_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*output_neurons_buffer,
0.0F,
output_elem_count_per_entry * entry_count,
stream_id);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
xy_config_count,
feature_map_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (forward_input_feature_map_group_count == 1)
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, true);
}
else
{
launch_exact_kernel(window_sizes[0], forward_x_block_size, false);
}
}
else
{
if (forward_input_feature_map_group_count == 1)
{
launch_kernel(forward_x_block_size, true);
}
else
{
launch_kernel(forward_x_block_size, false);
}
}
}
void convolution_2d_layer_updater_cuda_kepler::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
if (!different_input)
throw neural_network_exception("convolution_2d_layer_updater_cuda_kepler is not able to backprop to the same input");
if (!backprop_required)
throw neural_network_exception("convolution_2d_layer_updater_cuda_kepler is not configured to do backprop but requested to");
if (dynamic_memobjects[2] == 0)
dynamic_memobjects[2] = cuda_texture_smart_ptr(new cuda_texture(output_errors_buffer));
cuda_texture& output_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[2].get()));
if (dynamic_memobjects[1] == 0)
dynamic_memobjects[1] = cuda_texture_smart_ptr(new cuda_texture(data[0]));
cuda_texture& weights_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[1].get()));
int xy_config_count = backward_x_block_count * input_configuration_specific.dimension_sizes[1];
const xy_config * xy_config_list = static_cast<const xy_config *>((const void *)*additional_buffers[4]);
int feature_map_config_count = backward_output_feature_map_group_count * backward_input_feature_map_block_count;
const feature_map_config * feature_map_config_list = static_cast<const feature_map_config *>((const void *)*additional_buffers[5]);
if (backward_output_feature_map_group_count > 1)
cuda_util::set_with_value(
*cuda_config,
*input_errors_buffer,
0.0F,
input_elem_count_per_entry * entry_count,
stream_id);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
xy_config_count,
feature_map_config_count,
entry_count);
if (window_sizes[0] <= MAX_WINDOW_WIDTH)
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, true);
}
else
{
launch_backprop_exact_kernel(window_sizes[0], backward_x_block_size, false);
}
}
else
{
if (backward_output_feature_map_group_count == 1)
{
launch_backprop_kernel(backward_x_block_size, true);
}
else
{
launch_backprop_kernel(backward_x_block_size, false);
}
}
}
void convolution_2d_layer_updater_cuda_kepler::enqueue_update_weights(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& training_speed,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
// Update biases
{
int threadblock_size = get_threadblock_size_biases(output_elem_count_per_feature_map);
dim3 grid_size(1, output_configuration_specific.feature_map_count, entry_count);
dim3 block_size(threadblock_size, 1, 1);
int min_iteration_count = output_elem_count_per_feature_map / threadblock_size;
convolution_2d_update_biases_upd_kernel_kepler<<<grid_size, block_size, 0, stream_id>>>(
*data[1],
*output_errors_buffer,
*training_speed[1],
output_configuration_specific.feature_map_count,
output_elem_count_per_feature_map,
min_iteration_count);
}
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cuda_safe_call(cudaBindTexture(0, output_tex_ref, *output_errors_buffer, desc, output_elem_count_per_entry * entry_count * sizeof(float)));
if (dynamic_memobjects[0] == 0)
dynamic_memobjects[0] = cuda_texture_smart_ptr(new cuda_texture(input_neurons_buffer));
cuda_texture& input_tex = *(dynamic_cast<cuda_texture *>(dynamic_memobjects[0].get()));
int texture_offset = offset_input_entry_id * input_elem_count_per_entry;
int feature_map_config_count = updater_output_feature_map_block_count * input_configuration_specific.feature_map_count;
const feature_map_config * feature_map_config_list = static_cast<const feature_map_config *>((const void *)*additional_buffers[3]);
// Update weights
{
if (updater_window_x_block_count == 1)
{
int output_y_weight_y_config_count = updater_output_y_group_count * window_sizes[1];
const output_y_weight_y_config * output_y_weight_y_config_list = static_cast<const output_y_weight_y_config *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_y_weight_y_config_count,
feature_map_config_count,
entry_count);
if (updater_output_y_group_count == 1)
{
launch_update_weights_exact_kernel(window_sizes[0], true);
}
else
{
launch_update_weights_exact_kernel(window_sizes[0], false);
}
}
else
{
int output_y_weight_y_weight_x_config_count = updater_output_y_group_count * window_sizes[1] * updater_window_x_block_count;
const output_y_weight_y_weight_x_config * output_y_weight_y_weight_x_config_list = static_cast<const output_y_weight_y_weight_x_config *>((const void *)*additional_buffers[2]);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_y_weight_y_weight_x_config_count,
feature_map_config_count,
entry_count);
if (updater_output_y_group_count == 1)
{
launch_update_weights_kernel_const(true);
}
else
{
launch_update_weights_kernel_const(false);
}
}
}
}
int convolution_2d_layer_updater_cuda_kepler::get_block_size(int width)
{
int block_count = (width + MAX_BLOCK_SIZE - 1) / MAX_BLOCK_SIZE;
int block_size = (width + block_count - 1) / block_count;
return block_size;
}
void convolution_2d_layer_updater_cuda_kepler::updater_configured()
{
std::tr1::shared_ptr<const convolution_layer> layer_derived = std::tr1::dynamic_pointer_cast<const convolution_layer>(layer_schema);
for(std::vector<unsigned int>::const_iterator it = layer_derived->window_sizes.begin(); it != layer_derived->window_sizes.end(); ++it)
window_sizes.push_back(static_cast<int>(*it));
forward_x_block_size = get_block_size(output_configuration_specific.dimension_sizes[0]);
forward_x_block_count = (output_configuration_specific.dimension_sizes[0] + forward_x_block_size - 1) / forward_x_block_size;
forward_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_output_feature_map_block_count = (output_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
updater_window_x_block_count = (window_sizes[0] <= MAX_WINDOW_WIDTH) ? 1 : (window_sizes[0] + WINDOW_WIDTH_LOCAL - 1) / WINDOW_WIDTH_LOCAL;
if (backprop_required)
{
backward_x_block_size = get_block_size(input_configuration_specific.dimension_sizes[0]);
backward_x_block_count = (input_configuration_specific.dimension_sizes[0] + backward_x_block_size - 1) / backward_x_block_size;
backward_input_feature_map_block_count = (input_configuration_specific.feature_map_count + FEATURE_MAP_BLOCK_SIZE - 1) / FEATURE_MAP_BLOCK_SIZE;
}
}
bool convolution_2d_layer_updater_cuda_kepler::is_in_place_backprop() const
{
return false;
}
std::vector<unsigned int> convolution_2d_layer_updater_cuda_kepler::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
res.push_back(output_elem_count_per_entry);
return res;
}
int convolution_2d_layer_updater_cuda_kepler::get_threadblock_size_biases(int output_neuron_count)
{
int threadblock_size;
if (output_neuron_count < 128)
{
threadblock_size = (output_neuron_count + 32 - 1) / 32 * 32;
}
else
{
int threadblock_count = (output_neuron_count + 128 - 1) / 128;
threadblock_size = (output_neuron_count + threadblock_count - 1) / threadblock_count;
threadblock_size = (threadblock_size + 32 - 1) / 32 * 32;
}
return threadblock_size;
}
std::vector<size_t> convolution_2d_layer_updater_cuda_kepler::get_sizes_of_additional_buffers_fixed() const
{
std::vector<size_t> res;
res.push_back(sizeof(xy_config) * forward_x_block_count * output_configuration_specific.dimension_sizes[1]);
res.push_back(sizeof(feature_map_config) * input_configuration_specific.feature_map_count * forward_output_feature_map_block_count);
res.push_back(sizeof(output_y_weight_y_config) * window_sizes[1] * output_configuration_specific.dimension_sizes[1] * updater_window_x_block_count);
res.push_back(sizeof(feature_map_config) * input_configuration_specific.feature_map_count * updater_output_feature_map_block_count);
if (backprop_required)
{
res.push_back(sizeof(xy_config) * backward_x_block_count * input_configuration_specific.dimension_sizes[1]);
res.push_back(sizeof(feature_map_config) * output_configuration_specific.feature_map_count * backward_input_feature_map_block_count);
}
return res;
}
void convolution_2d_layer_updater_cuda_kepler::fill_additional_buffers(const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers) const
{
{
std::vector<xy_config> task_list;
for(int y = 0; y < output_configuration_specific.dimension_sizes[1]; ++y)
for(int x = 0; x < forward_x_block_count; ++x)
task_list.push_back(xy_config(y, x * forward_x_block_size));
cuda_safe_call(cudaMemcpy(*additional_buffers[0], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), cudaMemcpyHostToDevice));
}
{
std::vector<feature_map_config> task_list;
for(int input_feature_map_group_id = 0; input_feature_map_group_id < forward_input_feature_map_group_count; ++input_feature_map_group_id)
for(int output_feature_map_id = 0; output_feature_map_id < forward_output_feature_map_block_count; ++output_feature_map_id)
task_list.push_back(feature_map_config(input_feature_map_group_id * forward_input_feature_map_group_size, output_feature_map_id * FEATURE_MAP_BLOCK_SIZE));
cuda_safe_call(cudaMemcpy(*additional_buffers[1], &(*task_list.begin()), sizeof(feature_map_config) * task_list.size(), cudaMemcpyHostToDevice));
}
if (updater_window_x_block_count == 1)
{
std::vector<output_y_weight_y_config> task_list;
for(int output_y = 0; output_y < updater_output_y_group_count; ++output_y)
for(int weight_y = 0; weight_y < window_sizes[1]; ++weight_y)
task_list.push_back(output_y_weight_y_config(output_y, weight_y));
cuda_safe_call(cudaMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), cudaMemcpyHostToDevice));
}
else
{
std::vector<output_y_weight_y_weight_x_config> task_list;
for(int output_y = 0; output_y < updater_output_y_group_count; ++output_y)
for(int weight_y = 0; weight_y < window_sizes[1]; ++weight_y)
for(int weight_x = 0; weight_x < updater_window_x_block_count; ++weight_x)
task_list.push_back(output_y_weight_y_weight_x_config(output_y, weight_y, weight_x * FEATURE_MAP_BLOCK_SIZE));
cuda_safe_call(cudaMemcpy(*additional_buffers[2], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), cudaMemcpyHostToDevice));
}
{
std::vector<std::pair<int, int> > pair_list;
cuda_util::fill_tiling_pattern(input_configuration_specific.feature_map_count, updater_output_feature_map_block_count, pair_list);
std::vector<feature_map_config> task_list;
for(std::vector<std::pair<int, int> >::const_iterator it = pair_list.begin(); it != pair_list.end(); ++it)
task_list.push_back(feature_map_config(it->first, it->second * FEATURE_MAP_BLOCK_SIZE));
cuda_safe_call(cudaMemcpy(*additional_buffers[3], &(*task_list.begin()), sizeof(feature_map_config) * task_list.size(), cudaMemcpyHostToDevice));
}
if (backprop_required)
{
{
std::vector<xy_config> task_list;
for(int y = 0; y < input_configuration_specific.dimension_sizes[1]; ++y)
for(int x = 0; x < backward_x_block_count; ++x)
task_list.push_back(xy_config(y, x * backward_x_block_size + (backward_x_block_size - 1)));
cuda_safe_call(cudaMemcpy(*additional_buffers[4], &(*task_list.begin()), sizeof(xy_config) * task_list.size(), cudaMemcpyHostToDevice));
}
{
std::vector<feature_map_config> task_list;
for(int output_feature_map_group_id = 0; output_feature_map_group_id < backward_output_feature_map_group_count; ++output_feature_map_group_id)
for(int input_feature_map_id = 0; input_feature_map_id < backward_input_feature_map_block_count; ++input_feature_map_id)
task_list.push_back(feature_map_config(input_feature_map_id * FEATURE_MAP_BLOCK_SIZE, output_feature_map_group_id * backward_output_feature_map_group_size));
cuda_safe_call(cudaMemcpy(*additional_buffers[5], &(*task_list.begin()), sizeof(feature_map_config) * task_list.size(), cudaMemcpyHostToDevice));
}
}
}
void convolution_2d_layer_updater_cuda_kepler::set_max_entry_count(unsigned int max_entry_count)
{
forward_input_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
forward_x_block_count * output_configuration_specific.dimension_sizes[1] * forward_output_feature_map_block_count * max_entry_count,
input_configuration_specific.feature_map_count);
forward_input_feature_map_group_size = (input_configuration_specific.feature_map_count + forward_input_feature_map_group_count - 1) / forward_input_feature_map_group_count;
updater_output_y_group_count = cuda_util::get_group_count(
*cuda_config,
updater_output_feature_map_block_count * input_configuration_specific.feature_map_count * window_sizes[1] * max_entry_count * updater_window_x_block_count,
output_configuration_specific.dimension_sizes[1]);
updater_output_y_group_size = (output_configuration_specific.dimension_sizes[1] + updater_output_y_group_count - 1) / updater_output_y_group_count;
if (backprop_required)
{
backward_output_feature_map_group_count = cuda_util::get_group_count(
*cuda_config,
backward_x_block_count * input_configuration_specific.dimension_sizes[1] * backward_input_feature_map_block_count * max_entry_count,
output_configuration_specific.feature_map_count);
backward_output_feature_map_group_size = (output_configuration_specific.feature_map_count + backward_output_feature_map_group_count - 1) / backward_output_feature_map_group_count;
}
}
int convolution_2d_layer_updater_cuda_kepler::get_dynamic_memobject_count() const
{
return 3;
}
}
}
|
194b2b09d24cd88ee0d6cdfb7010a53fe3343877.hip | // !!! This is a file automatically generated by hipify!!!
/* Sushil Dubey, Shashi Dugad, TIFR, July 2017
*
* File Name: RawToClusterGPU.cu
* Description: It converts Raw data into Digi Format on GPU
* Finaly the Output of RawToDigi data is given to pixelClusterizer
**/
// C++ includes
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
// CUDA includes
#include <hip/hip_runtime.h>
// CMSSW includes
#include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h"
#include "CondFormats/SiPixelObjects/interface/SiPixelROCsStatusAndMapping.h"
#include "DataFormats/FEDRawData/interface/FEDNumbering.h"
#include "DataFormats/TrackerCommon/interface/TrackerTopology.h"
#include "DataFormats/SiPixelDigi/interface/SiPixelDigiConstants.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "HeterogeneousCore/CUDAUtilities/interface/host_unique_ptr.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuCalibPixel.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClustering.h"
// local includes
#include "SiPixelRawToClusterGPUKernel.h"
namespace pixelgpudetails {
SiPixelRawToClusterGPUKernel::WordFedAppender::WordFedAppender(uint32_t maxFedWords) {
word_ = cms::cuda::make_host_noncached_unique<unsigned int[]>(maxFedWords, hipHostMallocWriteCombined);
fedId_ = cms::cuda::make_host_noncached_unique<unsigned char[]>(maxFedWords, hipHostMallocWriteCombined);
}
void SiPixelRawToClusterGPUKernel::WordFedAppender::initializeWordFed(int fedId,
unsigned int wordCounterGPU,
const cms_uint32_t *src,
unsigned int length) {
std::memcpy(word_.get() + wordCounterGPU, src, sizeof(cms_uint32_t) * length);
std::memset(fedId_.get() + wordCounterGPU / 2, fedId - FEDNumbering::MINSiPixeluTCAFEDID, length / 2);
}
////////////////////
__device__ bool isBarrel(uint32_t rawId) {
return (PixelSubdetector::PixelBarrel == ((rawId >> DetId::kSubdetOffset) & DetId::kSubdetMask));
}
__device__ pixelgpudetails::DetIdGPU getRawId(const SiPixelROCsStatusAndMapping *cablingMap,
uint8_t fed,
uint32_t link,
uint32_t roc) {
uint32_t index = fed * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
pixelgpudetails::DetIdGPU detId = {
cablingMap->rawId[index], cablingMap->rocInDet[index], cablingMap->moduleId[index]};
return detId;
}
//reference http://cmsdoxygen.web.cern.ch/cmsdoxygen/CMSSW_9_2_0/doc/html/dd/d31/FrameConversion_8cc_source.html
//http://cmslxr.fnal.gov/source/CondFormats/SiPixelObjects/src/PixelROC.cc?v=CMSSW_9_2_0#0071
// Convert local pixel to pixelgpudetails::global pixel
__device__ pixelgpudetails::Pixel frameConversion(
bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, pixelgpudetails::Pixel local) {
int slopeRow = 0, slopeCol = 0;
int rowOffset = 0, colOffset = 0;
if (bpix) {
if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
} // if roc
} else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1
if (rocIdInDetUnit < 8) {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = rocIdInDetUnit * pixelgpudetails::numColsInRoc;
} else {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (16 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
}
}
} else { // fpix
if (side == -1) { // pannel 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} else { // pannel 2
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} // side
}
uint32_t gRow = rowOffset + slopeRow * local.row;
uint32_t gCol = colOffset + slopeCol * local.col;
// inside frameConversion row: gRow, column: gCol
pixelgpudetails::Pixel global = {gRow, gCol};
return global;
}
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint8_t conversionError(uint8_t fedId, uint8_t status, bool debug = false) {
uint8_t errorType = 0;
switch (status) {
case (1): {
if (debug)
printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId);
errorType = 35;
break;
}
case (2): {
if (debug)
printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId);
errorType = 36;
break;
}
case (3): {
if (debug)
printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId);
errorType = 37;
break;
}
case (4): {
if (debug)
printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId);
errorType = 38;
break;
}
default:
if (debug)
printf("Cabling check returned unexpected result, status = %i\n", status);
};
return errorType;
}
__device__ bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) {
/// row and column in ROC representation
return ((rocRow < pixelgpudetails::numRowsInRoc) & (rocCol < pixelgpudetails::numColsInRoc));
}
__device__ bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); }
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint8_t checkROC(uint32_t errorWord,
uint8_t fedId,
uint32_t link,
const SiPixelROCsStatusAndMapping *cablingMap,
bool debug = false) {
uint8_t errorType = (errorWord >> sipixelconstants::ROC_shift) & sipixelconstants::ERROR_mask;
if (errorType < 25)
return 0;
bool errorFound = false;
switch (errorType) {
case (25): {
errorFound = true;
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + 1;
if (index > 1 && index <= cablingMap->size) {
if (!(link == cablingMap->link[index] && 1 == cablingMap->roc[index]))
errorFound = false;
}
if (debug and errorFound)
printf("Invalid ROC = 25 found (errorType = 25)\n");
break;
}
case (26): {
if (debug)
printf("Gap word found (errorType = 26)\n");
errorFound = true;
break;
}
case (27): {
if (debug)
printf("Dummy word found (errorType = 27)\n");
errorFound = true;
break;
}
case (28): {
if (debug)
printf("Error fifo nearly full (errorType = 28)\n");
errorFound = true;
break;
}
case (29): {
if (debug)
printf("Timeout on a channel (errorType = 29)\n");
if ((errorWord >> sipixelconstants::OMIT_ERR_shift) & sipixelconstants::OMIT_ERR_mask) {
if (debug)
printf("...first errorType=29 error, this gets masked out\n");
}
errorFound = true;
break;
}
case (30): {
if (debug)
printf("TBM error trailer (errorType = 30)\n");
int stateMatch_bits = 4;
int stateMatch_shift = 8;
uint32_t stateMatch_mask = ~(~uint32_t(0) << stateMatch_bits);
int stateMatch = (errorWord >> stateMatch_shift) & stateMatch_mask;
if (stateMatch != 1 && stateMatch != 8) {
if (debug)
printf("FED error 30 with unexpected State Bits (errorType = 30)\n");
}
if (stateMatch == 1)
errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30
errorFound = true;
break;
}
case (31): {
if (debug)
printf("Event number error (errorType = 31)\n");
errorFound = true;
break;
}
default:
errorFound = false;
};
return errorFound ? errorType : 0;
}
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint32_t getErrRawID(uint8_t fedId,
uint32_t errWord,
uint32_t errorType,
const SiPixelROCsStatusAndMapping *cablingMap,
bool debug = false) {
uint32_t rID = 0xffffffff;
switch (errorType) {
case 25:
case 30:
case 31:
case 36:
case 40: {
uint32_t roc = 1;
uint32_t link = sipixelconstants::getLink(errWord);
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
case 29: {
int chanNmbr = 0;
const int DB0_shift = 0;
const int DB1_shift = DB0_shift + 1;
const int DB2_shift = DB1_shift + 1;
const int DB3_shift = DB2_shift + 1;
const int DB4_shift = DB3_shift + 1;
const uint32_t DataBit_mask = ~(~uint32_t(0) << 1);
int CH1 = (errWord >> DB0_shift) & DataBit_mask;
int CH2 = (errWord >> DB1_shift) & DataBit_mask;
int CH3 = (errWord >> DB2_shift) & DataBit_mask;
int CH4 = (errWord >> DB3_shift) & DataBit_mask;
int CH5 = (errWord >> DB4_shift) & DataBit_mask;
int BLOCK_bits = 3;
int BLOCK_shift = 8;
uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits);
int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask;
int localCH = 1 * CH1 + 2 * CH2 + 3 * CH3 + 4 * CH4 + 5 * CH5;
if (BLOCK % 2 == 0)
chanNmbr = (BLOCK / 2) * 9 + localCH;
else
chanNmbr = ((BLOCK - 1) / 2) * 9 + 4 + localCH;
if ((chanNmbr < 1) || (chanNmbr > 36))
break; // signifies unexpected result
uint32_t roc = 1;
uint32_t link = chanNmbr;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
case 37:
case 38: {
uint32_t roc = sipixelconstants::getROC(errWord);
uint32_t link = sipixelconstants::getLink(errWord);
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
default:
break;
};
return rID;
}
// Kernel to perform Raw to Digi conversion
__global__ void RawToDigi_kernel(const SiPixelROCsStatusAndMapping *cablingMap,
const unsigned char *modToUnp,
const uint32_t wordCounter,
const uint32_t *word,
const uint8_t *fedIds,
uint16_t *xx,
uint16_t *yy,
uint16_t *adc,
uint32_t *pdigi,
uint32_t *rawIdArr,
uint16_t *moduleId,
cms::cuda::SimpleVector<SiPixelErrorCompact> *err,
bool useQualityInfo,
bool includeErrors,
bool debug) {
//if (threadIdx.x==0) printf("Event: %u blockIdx.x: %u start: %u end: %u\n", eventno, blockIdx.x, begin, end);
int32_t first = threadIdx.x + blockIdx.x * blockDim.x;
for (int32_t iloop = first, nend = wordCounter; iloop < nend; iloop += blockDim.x * gridDim.x) {
auto gIndex = iloop;
xx[gIndex] = 0;
yy[gIndex] = 0;
adc[gIndex] = 0;
bool skipROC = false;
uint8_t fedId = fedIds[gIndex / 2]; // +1200;
// initialize (too many coninue below)
pdigi[gIndex] = 0;
rawIdArr[gIndex] = 0;
moduleId[gIndex] = gpuClustering::invalidModuleId;
uint32_t ww = word[gIndex]; // Array containing 32 bit raw data
if (ww == 0) {
// 0 is an indicator of a noise/dead channel, skip these pixels during clusterization
continue;
}
uint32_t link = sipixelconstants::getLink(ww); // Extract link
uint32_t roc = sipixelconstants::getROC(ww); // Extract Roc in link
pixelgpudetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc);
uint8_t errorType = checkROC(ww, fedId, link, cablingMap, debug);
skipROC = (roc < pixelgpudetails::maxROCIndex) ? false : (errorType != 0);
if (includeErrors and skipROC) {
uint32_t rID = getErrRawID(fedId, ww, errorType, cablingMap, debug);
err->push_back(SiPixelErrorCompact{rID, ww, errorType, fedId});
continue;
}
uint32_t rawId = detId.rawId;
uint32_t rocIdInDetUnit = detId.rocInDet;
bool barrel = isBarrel(rawId);
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
if (useQualityInfo) {
skipROC = cablingMap->badRocs[index];
if (skipROC)
continue;
}
skipROC = modToUnp[index];
if (skipROC)
continue;
uint32_t layer = 0;
int side = 0, panel = 0, module = 0;
if (barrel) {
layer = (rawId >> pixelgpudetails::layerStartBit) & pixelgpudetails::layerMask;
module = (rawId >> pixelgpudetails::moduleStartBit) & pixelgpudetails::moduleMask;
side = (module < 5) ? -1 : 1;
} else {
// endcap ids
layer = 0;
panel = (rawId >> pixelgpudetails::panelStartBit) & pixelgpudetails::panelMask;
side = (panel == 1) ? -1 : 1;
}
// ***special case of layer to 1 be handled here
pixelgpudetails::Pixel localPix;
if (layer == 1) {
uint32_t col = sipixelconstants::getCol(ww);
uint32_t row = sipixelconstants::getRow(ww);
localPix.row = row;
localPix.col = col;
if (includeErrors) {
if (not rocRowColIsValid(row, col)) {
uint8_t error = conversionError(fedId, 3, debug); //use the device function and fill the arrays
err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("BPIX1 Error status: %i\n", error);
continue;
}
}
} else {
// ***conversion rules for dcol and pxid
uint32_t dcol = sipixelconstants::getDCol(ww);
uint32_t pxid = sipixelconstants::getPxId(ww);
uint32_t row = pixelgpudetails::numRowsInRoc - pxid / 2;
uint32_t col = dcol * 2 + pxid % 2;
localPix.row = row;
localPix.col = col;
if (includeErrors and not dcolIsValid(dcol, pxid)) {
uint8_t error = conversionError(fedId, 3, debug);
err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc);
continue;
}
}
pixelgpudetails::Pixel globalPix = frameConversion(barrel, side, layer, rocIdInDetUnit, localPix);
xx[gIndex] = globalPix.row; // origin shifting by 1 0-159
yy[gIndex] = globalPix.col; // origin shifting by 1 0-415
adc[gIndex] = sipixelconstants::getADC(ww);
pdigi[gIndex] = pixelgpudetails::pack(globalPix.row, globalPix.col, adc[gIndex]);
moduleId[gIndex] = detId.moduleId;
rawIdArr[gIndex] = rawId;
} // end of loop (gIndex < end)
} // end of Raw to Digi kernel
__global__ void fillHitsModuleStart(uint32_t const *__restrict__ cluStart, uint32_t *__restrict__ moduleStart) {
assert(gpuClustering::maxNumModules < 2048); // easy to extend at least till 32*1024
assert(1 == gridDim.x);
assert(0 == blockIdx.x);
int first = threadIdx.x;
// limit to maxHitsInModule()
for (int i = first, iend = gpuClustering::maxNumModules; i < iend; i += blockDim.x) {
moduleStart[i + 1] = ::min(gpuClustering::maxHitsInModule(), cluStart[i]);
}
__shared__ uint32_t ws[32];
cms::cuda::blockPrefixScan(moduleStart + 1, moduleStart + 1, 1024, ws);
cms::cuda::blockPrefixScan(moduleStart + 1025, moduleStart + 1025, gpuClustering::maxNumModules - 1024, ws);
for (int i = first + 1025, iend = gpuClustering::maxNumModules + 1; i < iend; i += blockDim.x) {
moduleStart[i] += moduleStart[1024];
}
__syncthreads();
#ifdef GPU_DEBUG
assert(0 == moduleStart[0]);
auto c0 = ::min(gpuClustering::maxHitsInModule(), cluStart[0]);
assert(c0 == moduleStart[1]);
assert(moduleStart[1024] >= moduleStart[1023]);
assert(moduleStart[1025] >= moduleStart[1024]);
assert(moduleStart[gpuClustering::maxNumModules] >= moduleStart[1025]);
for (int i = first, iend = gpuClustering::maxNumModules + 1; i < iend; i += blockDim.x) {
if (0 != i)
assert(moduleStart[i] >= moduleStart[i - i]);
// [BPX1, BPX2, BPX3, BPX4, FP1, FP2, FP3, FN1, FN2, FN3, LAST_VALID]
// [ 0, 96, 320, 672, 1184, 1296, 1408, 1520, 1632, 1744, 1856]
if (i == 96 || i == 1184 || i == 1744 || i == gpuClustering::maxNumModules)
printf("moduleStart %d %d\n", i, moduleStart[i]);
}
#endif
}
// Interface to outside
void SiPixelRawToClusterGPUKernel::makeClustersAsync(bool isRun2,
const SiPixelClusterThresholds clusterThresholds,
const SiPixelROCsStatusAndMapping *cablingMap,
const unsigned char *modToUnp,
const SiPixelGainForHLTonGPU *gains,
const WordFedAppender &wordFed,
SiPixelFormatterErrors &&errors,
const uint32_t wordCounter,
const uint32_t fedCounter,
const uint32_t maxFedWords,
bool useQualityInfo,
bool includeErrors,
bool debug,
hipStream_t stream) {
nDigis = wordCounter;
#ifdef GPU_DEBUG
std::cout << "decoding " << wordCounter << " digis. Max is " << maxFedWords << std::endl;
#endif
digis_d = SiPixelDigisCUDA(maxFedWords, stream);
if (includeErrors) {
digiErrors_d = SiPixelDigiErrorsCUDA(maxFedWords, std::move(errors), stream);
}
clusters_d = SiPixelClustersCUDA(gpuClustering::maxNumModules, stream);
nModules_Clusters_h = cms::cuda::make_host_unique<uint32_t[]>(2, stream);
if (wordCounter) // protect in case of empty event....
{
const int threadsPerBlock = 512;
const int blocks = (wordCounter + threadsPerBlock - 1) / threadsPerBlock; // fill it all
assert(0 == wordCounter % 2);
// wordCounter is the total no of words in each event to be trasfered on device
auto word_d = cms::cuda::make_device_unique<uint32_t[]>(wordCounter, stream);
auto fedId_d = cms::cuda::make_device_unique<uint8_t[]>(wordCounter, stream);
cudaCheck(
hipMemcpyAsync(word_d.get(), wordFed.word(), wordCounter * sizeof(uint32_t), hipMemcpyDefault, stream));
cudaCheck(hipMemcpyAsync(
fedId_d.get(), wordFed.fedId(), wordCounter * sizeof(uint8_t) / 2, hipMemcpyDefault, stream));
// Launch rawToDigi kernel
hipLaunchKernelGGL(( RawToDigi_kernel), dim3(blocks), dim3(threadsPerBlock), 0, stream,
cablingMap,
modToUnp,
wordCounter,
word_d.get(),
fedId_d.get(),
digis_d.xx(),
digis_d.yy(),
digis_d.adc(),
digis_d.pdigi(),
digis_d.rawIdArr(),
digis_d.moduleInd(),
digiErrors_d.error(), // returns nullptr if default-constructed
useQualityInfo,
includeErrors,
debug);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
if (includeErrors) {
digiErrors_d.copyErrorToHostAsync(stream);
}
}
// End of Raw2Digi and passing data for clustering
{
// clusterizer ...
using namespace gpuClustering;
int threadsPerBlock = 256;
int blocks =
(::max(int(wordCounter), int(gpuClustering::maxNumModules)) + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( gpuCalibPixel::calibDigis), dim3(blocks), dim3(threadsPerBlock), 0, stream, isRun2,
digis_d.moduleInd(),
digis_d.xx(),
digis_d.yy(),
digis_d.adc(),
gains,
wordCounter,
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.clusModuleStart());
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
#ifdef GPU_DEBUG
std::cout << "CUDA countModules kernel launch with " << blocks << " blocks of " << threadsPerBlock
<< " threads\n";
#endif
hipLaunchKernelGGL(( countModules), dim3(blocks), dim3(threadsPerBlock), 0, stream,
digis_d.moduleInd(), clusters_d.moduleStart(), digis_d.clus(), wordCounter);
cudaCheck(hipGetLastError());
// read the number of modules into a data member, used by getProduct())
cudaCheck(hipMemcpyAsync(
&(nModules_Clusters_h[0]), clusters_d.moduleStart(), sizeof(uint32_t), hipMemcpyDefault, stream));
threadsPerBlock = 256;
blocks = maxNumModules;
#ifdef GPU_DEBUG
std::cout << "CUDA findClus kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n";
#endif
hipLaunchKernelGGL(( findClus), dim3(blocks), dim3(threadsPerBlock), 0, stream, digis_d.moduleInd(),
digis_d.xx(),
digis_d.yy(),
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(hipGetLastError());
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
// apply charge cut
hipLaunchKernelGGL(( clusterChargeCut), dim3(blocks), dim3(threadsPerBlock), 0, stream, clusterThresholds,
digis_d.moduleInd(),
digis_d.adc(),
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(hipGetLastError());
// count the module start indices already here (instead of
// rechits) so that the number of clusters/hits can be made
// available in the rechit producer without additional points of
// synchronization/ExternalWork
// MUST be ONE block
hipLaunchKernelGGL(( fillHitsModuleStart), dim3(1), dim3(1024), 0, stream, clusters_d.clusInModule(), clusters_d.clusModuleStart());
// last element holds the number of all clusters
cudaCheck(hipMemcpyAsync(&(nModules_Clusters_h[1]),
clusters_d.clusModuleStart() + gpuClustering::maxNumModules,
sizeof(uint32_t),
hipMemcpyDefault,
stream));
#ifdef GPU_DEBUG
hipDeviceSynchronize();
cudaCheck(hipGetLastError());
#endif
} // end clusterizer scope
}
} // namespace pixelgpudetails
| 194b2b09d24cd88ee0d6cdfb7010a53fe3343877.cu | /* Sushil Dubey, Shashi Dugad, TIFR, July 2017
*
* File Name: RawToClusterGPU.cu
* Description: It converts Raw data into Digi Format on GPU
* Finaly the Output of RawToDigi data is given to pixelClusterizer
**/
// C++ includes
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iomanip>
#include <iostream>
// CUDA includes
#include <cuda_runtime.h>
// CMSSW includes
#include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h"
#include "CondFormats/SiPixelObjects/interface/SiPixelROCsStatusAndMapping.h"
#include "DataFormats/FEDRawData/interface/FEDNumbering.h"
#include "DataFormats/TrackerCommon/interface/TrackerTopology.h"
#include "DataFormats/SiPixelDigi/interface/SiPixelDigiConstants.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "HeterogeneousCore/CUDAUtilities/interface/host_unique_ptr.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuCalibPixel.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClusterChargeCut.h"
#include "RecoLocalTracker/SiPixelClusterizer/plugins/gpuClustering.h"
// local includes
#include "SiPixelRawToClusterGPUKernel.h"
namespace pixelgpudetails {
SiPixelRawToClusterGPUKernel::WordFedAppender::WordFedAppender(uint32_t maxFedWords) {
word_ = cms::cuda::make_host_noncached_unique<unsigned int[]>(maxFedWords, cudaHostAllocWriteCombined);
fedId_ = cms::cuda::make_host_noncached_unique<unsigned char[]>(maxFedWords, cudaHostAllocWriteCombined);
}
void SiPixelRawToClusterGPUKernel::WordFedAppender::initializeWordFed(int fedId,
unsigned int wordCounterGPU,
const cms_uint32_t *src,
unsigned int length) {
std::memcpy(word_.get() + wordCounterGPU, src, sizeof(cms_uint32_t) * length);
std::memset(fedId_.get() + wordCounterGPU / 2, fedId - FEDNumbering::MINSiPixeluTCAFEDID, length / 2);
}
////////////////////
__device__ bool isBarrel(uint32_t rawId) {
return (PixelSubdetector::PixelBarrel == ((rawId >> DetId::kSubdetOffset) & DetId::kSubdetMask));
}
__device__ pixelgpudetails::DetIdGPU getRawId(const SiPixelROCsStatusAndMapping *cablingMap,
uint8_t fed,
uint32_t link,
uint32_t roc) {
uint32_t index = fed * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
pixelgpudetails::DetIdGPU detId = {
cablingMap->rawId[index], cablingMap->rocInDet[index], cablingMap->moduleId[index]};
return detId;
}
//reference http://cmsdoxygen.web.cern.ch/cmsdoxygen/CMSSW_9_2_0/doc/html/dd/d31/FrameConversion_8cc_source.html
//http://cmslxr.fnal.gov/source/CondFormats/SiPixelObjects/src/PixelROC.cc?v=CMSSW_9_2_0#0071
// Convert local pixel to pixelgpudetails::global pixel
__device__ pixelgpudetails::Pixel frameConversion(
bool bpix, int side, uint32_t layer, uint32_t rocIdInDetUnit, pixelgpudetails::Pixel local) {
int slopeRow = 0, slopeCol = 0;
int rowOffset = 0, colOffset = 0;
if (bpix) {
if (side == -1 && layer != 1) { // -Z side: 4 non-flipped modules oriented like 'dddd', except Layer 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
} // if roc
} else { // +Z side: 4 non-flipped modules oriented like 'pppp', but all 8 in layer1
if (rocIdInDetUnit < 8) {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = rocIdInDetUnit * pixelgpudetails::numColsInRoc;
} else {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (16 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
}
}
} else { // fpix
if (side == -1) { // pannel 1
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} else { // pannel 2
if (rocIdInDetUnit < 8) {
slopeRow = 1;
slopeCol = -1;
rowOffset = 0;
colOffset = (8 - rocIdInDetUnit) * pixelgpudetails::numColsInRoc - 1;
} else {
slopeRow = -1;
slopeCol = 1;
rowOffset = 2 * pixelgpudetails::numRowsInRoc - 1;
colOffset = (rocIdInDetUnit - 8) * pixelgpudetails::numColsInRoc;
}
} // side
}
uint32_t gRow = rowOffset + slopeRow * local.row;
uint32_t gCol = colOffset + slopeCol * local.col;
// inside frameConversion row: gRow, column: gCol
pixelgpudetails::Pixel global = {gRow, gCol};
return global;
}
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint8_t conversionError(uint8_t fedId, uint8_t status, bool debug = false) {
uint8_t errorType = 0;
switch (status) {
case (1): {
if (debug)
printf("Error in Fed: %i, invalid channel Id (errorType = 35\n)", fedId);
errorType = 35;
break;
}
case (2): {
if (debug)
printf("Error in Fed: %i, invalid ROC Id (errorType = 36)\n", fedId);
errorType = 36;
break;
}
case (3): {
if (debug)
printf("Error in Fed: %i, invalid dcol/pixel value (errorType = 37)\n", fedId);
errorType = 37;
break;
}
case (4): {
if (debug)
printf("Error in Fed: %i, dcol/pixel read out of order (errorType = 38)\n", fedId);
errorType = 38;
break;
}
default:
if (debug)
printf("Cabling check returned unexpected result, status = %i\n", status);
};
return errorType;
}
__device__ bool rocRowColIsValid(uint32_t rocRow, uint32_t rocCol) {
/// row and column in ROC representation
return ((rocRow < pixelgpudetails::numRowsInRoc) & (rocCol < pixelgpudetails::numColsInRoc));
}
__device__ bool dcolIsValid(uint32_t dcol, uint32_t pxid) { return ((dcol < 26) & (2 <= pxid) & (pxid < 162)); }
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint8_t checkROC(uint32_t errorWord,
uint8_t fedId,
uint32_t link,
const SiPixelROCsStatusAndMapping *cablingMap,
bool debug = false) {
uint8_t errorType = (errorWord >> sipixelconstants::ROC_shift) & sipixelconstants::ERROR_mask;
if (errorType < 25)
return 0;
bool errorFound = false;
switch (errorType) {
case (25): {
errorFound = true;
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + 1;
if (index > 1 && index <= cablingMap->size) {
if (!(link == cablingMap->link[index] && 1 == cablingMap->roc[index]))
errorFound = false;
}
if (debug and errorFound)
printf("Invalid ROC = 25 found (errorType = 25)\n");
break;
}
case (26): {
if (debug)
printf("Gap word found (errorType = 26)\n");
errorFound = true;
break;
}
case (27): {
if (debug)
printf("Dummy word found (errorType = 27)\n");
errorFound = true;
break;
}
case (28): {
if (debug)
printf("Error fifo nearly full (errorType = 28)\n");
errorFound = true;
break;
}
case (29): {
if (debug)
printf("Timeout on a channel (errorType = 29)\n");
if ((errorWord >> sipixelconstants::OMIT_ERR_shift) & sipixelconstants::OMIT_ERR_mask) {
if (debug)
printf("...first errorType=29 error, this gets masked out\n");
}
errorFound = true;
break;
}
case (30): {
if (debug)
printf("TBM error trailer (errorType = 30)\n");
int stateMatch_bits = 4;
int stateMatch_shift = 8;
uint32_t stateMatch_mask = ~(~uint32_t(0) << stateMatch_bits);
int stateMatch = (errorWord >> stateMatch_shift) & stateMatch_mask;
if (stateMatch != 1 && stateMatch != 8) {
if (debug)
printf("FED error 30 with unexpected State Bits (errorType = 30)\n");
}
if (stateMatch == 1)
errorType = 40; // 1=Overflow -> 40, 8=number of ROCs -> 30
errorFound = true;
break;
}
case (31): {
if (debug)
printf("Event number error (errorType = 31)\n");
errorFound = true;
break;
}
default:
errorFound = false;
};
return errorFound ? errorType : 0;
}
// error decoding and handling copied from EventFilter/SiPixelRawToDigi/src/ErrorChecker.cc
__device__ uint32_t getErrRawID(uint8_t fedId,
uint32_t errWord,
uint32_t errorType,
const SiPixelROCsStatusAndMapping *cablingMap,
bool debug = false) {
uint32_t rID = 0xffffffff;
switch (errorType) {
case 25:
case 30:
case 31:
case 36:
case 40: {
uint32_t roc = 1;
uint32_t link = sipixelconstants::getLink(errWord);
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
case 29: {
int chanNmbr = 0;
const int DB0_shift = 0;
const int DB1_shift = DB0_shift + 1;
const int DB2_shift = DB1_shift + 1;
const int DB3_shift = DB2_shift + 1;
const int DB4_shift = DB3_shift + 1;
const uint32_t DataBit_mask = ~(~uint32_t(0) << 1);
int CH1 = (errWord >> DB0_shift) & DataBit_mask;
int CH2 = (errWord >> DB1_shift) & DataBit_mask;
int CH3 = (errWord >> DB2_shift) & DataBit_mask;
int CH4 = (errWord >> DB3_shift) & DataBit_mask;
int CH5 = (errWord >> DB4_shift) & DataBit_mask;
int BLOCK_bits = 3;
int BLOCK_shift = 8;
uint32_t BLOCK_mask = ~(~uint32_t(0) << BLOCK_bits);
int BLOCK = (errWord >> BLOCK_shift) & BLOCK_mask;
int localCH = 1 * CH1 + 2 * CH2 + 3 * CH3 + 4 * CH4 + 5 * CH5;
if (BLOCK % 2 == 0)
chanNmbr = (BLOCK / 2) * 9 + localCH;
else
chanNmbr = ((BLOCK - 1) / 2) * 9 + 4 + localCH;
if ((chanNmbr < 1) || (chanNmbr > 36))
break; // signifies unexpected result
uint32_t roc = 1;
uint32_t link = chanNmbr;
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
case 37:
case 38: {
uint32_t roc = sipixelconstants::getROC(errWord);
uint32_t link = sipixelconstants::getLink(errWord);
uint32_t rID_temp = getRawId(cablingMap, fedId, link, roc).rawId;
if (rID_temp != gpuClustering::invalidModuleId)
rID = rID_temp;
break;
}
default:
break;
};
return rID;
}
// Kernel to perform Raw to Digi conversion
__global__ void RawToDigi_kernel(const SiPixelROCsStatusAndMapping *cablingMap,
const unsigned char *modToUnp,
const uint32_t wordCounter,
const uint32_t *word,
const uint8_t *fedIds,
uint16_t *xx,
uint16_t *yy,
uint16_t *adc,
uint32_t *pdigi,
uint32_t *rawIdArr,
uint16_t *moduleId,
cms::cuda::SimpleVector<SiPixelErrorCompact> *err,
bool useQualityInfo,
bool includeErrors,
bool debug) {
//if (threadIdx.x==0) printf("Event: %u blockIdx.x: %u start: %u end: %u\n", eventno, blockIdx.x, begin, end);
int32_t first = threadIdx.x + blockIdx.x * blockDim.x;
for (int32_t iloop = first, nend = wordCounter; iloop < nend; iloop += blockDim.x * gridDim.x) {
auto gIndex = iloop;
xx[gIndex] = 0;
yy[gIndex] = 0;
adc[gIndex] = 0;
bool skipROC = false;
uint8_t fedId = fedIds[gIndex / 2]; // +1200;
// initialize (too many coninue below)
pdigi[gIndex] = 0;
rawIdArr[gIndex] = 0;
moduleId[gIndex] = gpuClustering::invalidModuleId;
uint32_t ww = word[gIndex]; // Array containing 32 bit raw data
if (ww == 0) {
// 0 is an indicator of a noise/dead channel, skip these pixels during clusterization
continue;
}
uint32_t link = sipixelconstants::getLink(ww); // Extract link
uint32_t roc = sipixelconstants::getROC(ww); // Extract Roc in link
pixelgpudetails::DetIdGPU detId = getRawId(cablingMap, fedId, link, roc);
uint8_t errorType = checkROC(ww, fedId, link, cablingMap, debug);
skipROC = (roc < pixelgpudetails::maxROCIndex) ? false : (errorType != 0);
if (includeErrors and skipROC) {
uint32_t rID = getErrRawID(fedId, ww, errorType, cablingMap, debug);
err->push_back(SiPixelErrorCompact{rID, ww, errorType, fedId});
continue;
}
uint32_t rawId = detId.rawId;
uint32_t rocIdInDetUnit = detId.rocInDet;
bool barrel = isBarrel(rawId);
uint32_t index = fedId * MAX_LINK * MAX_ROC + (link - 1) * MAX_ROC + roc;
if (useQualityInfo) {
skipROC = cablingMap->badRocs[index];
if (skipROC)
continue;
}
skipROC = modToUnp[index];
if (skipROC)
continue;
uint32_t layer = 0;
int side = 0, panel = 0, module = 0;
if (barrel) {
layer = (rawId >> pixelgpudetails::layerStartBit) & pixelgpudetails::layerMask;
module = (rawId >> pixelgpudetails::moduleStartBit) & pixelgpudetails::moduleMask;
side = (module < 5) ? -1 : 1;
} else {
// endcap ids
layer = 0;
panel = (rawId >> pixelgpudetails::panelStartBit) & pixelgpudetails::panelMask;
side = (panel == 1) ? -1 : 1;
}
// ***special case of layer to 1 be handled here
pixelgpudetails::Pixel localPix;
if (layer == 1) {
uint32_t col = sipixelconstants::getCol(ww);
uint32_t row = sipixelconstants::getRow(ww);
localPix.row = row;
localPix.col = col;
if (includeErrors) {
if (not rocRowColIsValid(row, col)) {
uint8_t error = conversionError(fedId, 3, debug); //use the device function and fill the arrays
err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("BPIX1 Error status: %i\n", error);
continue;
}
}
} else {
// ***conversion rules for dcol and pxid
uint32_t dcol = sipixelconstants::getDCol(ww);
uint32_t pxid = sipixelconstants::getPxId(ww);
uint32_t row = pixelgpudetails::numRowsInRoc - pxid / 2;
uint32_t col = dcol * 2 + pxid % 2;
localPix.row = row;
localPix.col = col;
if (includeErrors and not dcolIsValid(dcol, pxid)) {
uint8_t error = conversionError(fedId, 3, debug);
err->push_back(SiPixelErrorCompact{rawId, ww, error, fedId});
if (debug)
printf("Error status: %i %d %d %d %d\n", error, dcol, pxid, fedId, roc);
continue;
}
}
pixelgpudetails::Pixel globalPix = frameConversion(barrel, side, layer, rocIdInDetUnit, localPix);
xx[gIndex] = globalPix.row; // origin shifting by 1 0-159
yy[gIndex] = globalPix.col; // origin shifting by 1 0-415
adc[gIndex] = sipixelconstants::getADC(ww);
pdigi[gIndex] = pixelgpudetails::pack(globalPix.row, globalPix.col, adc[gIndex]);
moduleId[gIndex] = detId.moduleId;
rawIdArr[gIndex] = rawId;
} // end of loop (gIndex < end)
} // end of Raw to Digi kernel
__global__ void fillHitsModuleStart(uint32_t const *__restrict__ cluStart, uint32_t *__restrict__ moduleStart) {
assert(gpuClustering::maxNumModules < 2048); // easy to extend at least till 32*1024
assert(1 == gridDim.x);
assert(0 == blockIdx.x);
int first = threadIdx.x;
// limit to maxHitsInModule()
for (int i = first, iend = gpuClustering::maxNumModules; i < iend; i += blockDim.x) {
moduleStart[i + 1] = std::min(gpuClustering::maxHitsInModule(), cluStart[i]);
}
__shared__ uint32_t ws[32];
cms::cuda::blockPrefixScan(moduleStart + 1, moduleStart + 1, 1024, ws);
cms::cuda::blockPrefixScan(moduleStart + 1025, moduleStart + 1025, gpuClustering::maxNumModules - 1024, ws);
for (int i = first + 1025, iend = gpuClustering::maxNumModules + 1; i < iend; i += blockDim.x) {
moduleStart[i] += moduleStart[1024];
}
__syncthreads();
#ifdef GPU_DEBUG
assert(0 == moduleStart[0]);
auto c0 = std::min(gpuClustering::maxHitsInModule(), cluStart[0]);
assert(c0 == moduleStart[1]);
assert(moduleStart[1024] >= moduleStart[1023]);
assert(moduleStart[1025] >= moduleStart[1024]);
assert(moduleStart[gpuClustering::maxNumModules] >= moduleStart[1025]);
for (int i = first, iend = gpuClustering::maxNumModules + 1; i < iend; i += blockDim.x) {
if (0 != i)
assert(moduleStart[i] >= moduleStart[i - i]);
// [BPX1, BPX2, BPX3, BPX4, FP1, FP2, FP3, FN1, FN2, FN3, LAST_VALID]
// [ 0, 96, 320, 672, 1184, 1296, 1408, 1520, 1632, 1744, 1856]
if (i == 96 || i == 1184 || i == 1744 || i == gpuClustering::maxNumModules)
printf("moduleStart %d %d\n", i, moduleStart[i]);
}
#endif
}
// Interface to outside
void SiPixelRawToClusterGPUKernel::makeClustersAsync(bool isRun2,
const SiPixelClusterThresholds clusterThresholds,
const SiPixelROCsStatusAndMapping *cablingMap,
const unsigned char *modToUnp,
const SiPixelGainForHLTonGPU *gains,
const WordFedAppender &wordFed,
SiPixelFormatterErrors &&errors,
const uint32_t wordCounter,
const uint32_t fedCounter,
const uint32_t maxFedWords,
bool useQualityInfo,
bool includeErrors,
bool debug,
cudaStream_t stream) {
nDigis = wordCounter;
#ifdef GPU_DEBUG
std::cout << "decoding " << wordCounter << " digis. Max is " << maxFedWords << std::endl;
#endif
digis_d = SiPixelDigisCUDA(maxFedWords, stream);
if (includeErrors) {
digiErrors_d = SiPixelDigiErrorsCUDA(maxFedWords, std::move(errors), stream);
}
clusters_d = SiPixelClustersCUDA(gpuClustering::maxNumModules, stream);
nModules_Clusters_h = cms::cuda::make_host_unique<uint32_t[]>(2, stream);
if (wordCounter) // protect in case of empty event....
{
const int threadsPerBlock = 512;
const int blocks = (wordCounter + threadsPerBlock - 1) / threadsPerBlock; // fill it all
assert(0 == wordCounter % 2);
// wordCounter is the total no of words in each event to be trasfered on device
auto word_d = cms::cuda::make_device_unique<uint32_t[]>(wordCounter, stream);
auto fedId_d = cms::cuda::make_device_unique<uint8_t[]>(wordCounter, stream);
cudaCheck(
cudaMemcpyAsync(word_d.get(), wordFed.word(), wordCounter * sizeof(uint32_t), cudaMemcpyDefault, stream));
cudaCheck(cudaMemcpyAsync(
fedId_d.get(), wordFed.fedId(), wordCounter * sizeof(uint8_t) / 2, cudaMemcpyDefault, stream));
// Launch rawToDigi kernel
RawToDigi_kernel<<<blocks, threadsPerBlock, 0, stream>>>(
cablingMap,
modToUnp,
wordCounter,
word_d.get(),
fedId_d.get(),
digis_d.xx(),
digis_d.yy(),
digis_d.adc(),
digis_d.pdigi(),
digis_d.rawIdArr(),
digis_d.moduleInd(),
digiErrors_d.error(), // returns nullptr if default-constructed
useQualityInfo,
includeErrors,
debug);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
if (includeErrors) {
digiErrors_d.copyErrorToHostAsync(stream);
}
}
// End of Raw2Digi and passing data for clustering
{
// clusterizer ...
using namespace gpuClustering;
int threadsPerBlock = 256;
int blocks =
(std::max(int(wordCounter), int(gpuClustering::maxNumModules)) + threadsPerBlock - 1) / threadsPerBlock;
gpuCalibPixel::calibDigis<<<blocks, threadsPerBlock, 0, stream>>>(isRun2,
digis_d.moduleInd(),
digis_d.xx(),
digis_d.yy(),
digis_d.adc(),
gains,
wordCounter,
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.clusModuleStart());
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
#ifdef GPU_DEBUG
std::cout << "CUDA countModules kernel launch with " << blocks << " blocks of " << threadsPerBlock
<< " threads\n";
#endif
countModules<<<blocks, threadsPerBlock, 0, stream>>>(
digis_d.moduleInd(), clusters_d.moduleStart(), digis_d.clus(), wordCounter);
cudaCheck(cudaGetLastError());
// read the number of modules into a data member, used by getProduct())
cudaCheck(cudaMemcpyAsync(
&(nModules_Clusters_h[0]), clusters_d.moduleStart(), sizeof(uint32_t), cudaMemcpyDefault, stream));
threadsPerBlock = 256;
blocks = maxNumModules;
#ifdef GPU_DEBUG
std::cout << "CUDA findClus kernel launch with " << blocks << " blocks of " << threadsPerBlock << " threads\n";
#endif
findClus<<<blocks, threadsPerBlock, 0, stream>>>(digis_d.moduleInd(),
digis_d.xx(),
digis_d.yy(),
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(cudaGetLastError());
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
// apply charge cut
clusterChargeCut<<<blocks, threadsPerBlock, 0, stream>>>(clusterThresholds,
digis_d.moduleInd(),
digis_d.adc(),
clusters_d.moduleStart(),
clusters_d.clusInModule(),
clusters_d.moduleId(),
digis_d.clus(),
wordCounter);
cudaCheck(cudaGetLastError());
// count the module start indices already here (instead of
// rechits) so that the number of clusters/hits can be made
// available in the rechit producer without additional points of
// synchronization/ExternalWork
// MUST be ONE block
fillHitsModuleStart<<<1, 1024, 0, stream>>>(clusters_d.clusInModule(), clusters_d.clusModuleStart());
// last element holds the number of all clusters
cudaCheck(cudaMemcpyAsync(&(nModules_Clusters_h[1]),
clusters_d.clusModuleStart() + gpuClustering::maxNumModules,
sizeof(uint32_t),
cudaMemcpyDefault,
stream));
#ifdef GPU_DEBUG
cudaDeviceSynchronize();
cudaCheck(cudaGetLastError());
#endif
} // end clusterizer scope
}
} // namespace pixelgpudetails
|
3e6755d6f0214de3b87a0cab5b2d32c63c38a039.hip | // !!! This is a file automatically generated by hipify!!!
#define real double
#define f64 double
#define HISTORY 4
#include <stdlib.h>
#include <stdio.h>
#include "lapacke.h"
// we must find out what causes graphics crash during SPECIES_ION
/* Auxiliary routines prototypes */
extern void print_matrix(char* desc, lapack_int m, lapack_int n, double* a, lapack_int lda);
extern void print_int_vector(char* desc, lapack_int n, lapack_int* a);
extern void Go_visit_the_other_file();
extern void Setup_residual_array();
#include "headers.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <stdio.h>
#include <windows.h>
#include "resource.h"
#include "flags.h"
#include "FFxtubes.h"
//#include "cppconst.h"
#include "cuda_struct.h"
#include "constant.h"
#include "d3d.h"
#include <d3dx9.h>
#include <dxerr.h>
#include <commdlg.h> // probably used by avi_utils
#include "surfacegraph_tri.h"
//#include "avi_utils.cpp" // for making .avi
#include "kernel.h"
#include <mfapi.h>
#include <mfidl.h>
#include <Mfreadwrite.h>
#include <mferror.h>
#include <iostream>
#include <shlwapi.h>
#include <combaseapi.h>
#pragma comment(lib, "mfreadwrite")
#pragma comment(lib, "mfplat")
#pragma comment(lib, "mf")
#pragma comment(lib, "mfuuid")
template <class T> void SafeRelease(T **ppT)
{
if (*ppT)
{
(*ppT)->Release();
*ppT = NULL;
}
}
//=======================================================
// Declarations of functions:
void RefreshGraphs(TriMesh & X, const int iGraphsFlag);
LRESULT CALLBACK WndProc(HWND, UINT, WPARAM, LPARAM);
INT_PTR CALLBACK About(HWND, UINT, WPARAM, LPARAM);
INT_PTR CALLBACK SetupBox(HWND, UINT, WPARAM, LPARAM);
extern f64 GetEzShape__(f64 r);
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
extern void Zap_the_back();
extern f64 * temp_array_host;
extern OhmsCoeffs * p_OhmsCoeffs_host;
extern f64 * p_graphdata1_host,* p_graphdata2_host,* p_graphdata3_host, *p_graphdata4_host, *p_graphdata5_host, *p_graphdata6_host;
extern f64 * p_Tgraph_host[9];
extern f64 * p_accelgraph_host[12];
extern f64 * p_Ohmsgraph_host[20];
extern f64 * p_arelz_graph_host[12];
// Global variables:
// =================
//extern f64_vec3 * p_B_host;
extern f64 EzStrength_;
extern cuSyst cuSyst1, cuSyst2, cuSyst3;
extern D3D Direct3D;
extern f64 * p_temphost1, *p_temphost2,
*p_temphost3, *p_temphost4, *p_temphost5, *p_temphost6;
extern __device__ f64 * p_LapCoeffself;
extern __device__ f64 * p_temp1;
extern __device__ long * p_longtemp;
extern __device__ f64 * p_Az, *p_LapAz;
float xzscale;
bool bCullNone = false;
bool bGlobalsave = false;
int GlobalSwitchBox = 0;
int iGlobalScratch;
real GlobalHeightScale;
int GlobalSpeciesToGraph = SPECIES_ION;
int GlobalWhichLabels = 0;
bool GlobalRenderLabels = false;
int GlobalColoursPlanView = 0;
bool GlobalBothSystemsInUse;
bool GlobalCutaway = true;
unsigned int cw; // control word for floating point hardware exception hiding
TriMesh * pX, *pXnew;
TriMesh X1, X2, X3, X4;
cuSyst cuSyst_host, cuSyst_host2;
D3DXVECTOR3 GlobalEye, GlobalLookat, GlobalPlanEye, GlobalPlanEye2, GlobalPlanLookat,
GlobalPlanLookat2, GlobalEye2, GlobalLookat2;
D3DXVECTOR3 newEye;
D3DXVECTOR3 newLookat;
IDirect3DSurface9* p_backbuffer_surface;
long steps_remaining, GlobalStepsCounter, steps_remaining_CPU;
real evaltime, h;
extern real GlobalIzElasticity;
FILE * massfile, *maxfile;
// Global Variables:
HINSTANCE hInst; // current instance
// window vars:
HWND hWnd, hwndGraphics;
WNDCLASSEX wcex;
TCHAR szTitle[1024]; // The title bar text
TCHAR szWindowClass[1024]; // the main window class name
char Functionalfilename[1024];
int GlobalGraphSetting[8];
surfacegraph Graph[8]; // why was it 5? // 5th one can be whole thing.
float Historic_max[512][HISTORY]; // if max is falling, use historic maximum for graph.
float Historic_min[512][HISTORY];
int Historic_powermax[512];
int Historic_powermin[512]; // just store previous value only.
bool flaglist[NMINOR];
bool boolGlobalHistory, GlobalboolDisplayMeshWireframe;
// avi file -oriented variables
int const NUMAVI = 9;
//HAVI hAvi[NUMAVI + 1]; // does it work without OHMSLAW? // OHMSLAW,
int const GraphFlags[NUMAVI] = { SPECIES_ION, OVERALL, JZAZBXYEZ, ONE_D, IONIZEGRAPH,
DTGRAPH, ACCELGRAPHS, OHMS2, ARELZ};
WCHAR szmp4[NUMAVI][128] = { L"Elec",L"Total",L"JzAzBxy",L"Test",
L"Ionize", L"dT", L"Accel", L"Ohms", L"arelz"};
//AVICOMPRESSOPTIONS opts;
int counter;
HBITMAP surfbit, dib;
HDC surfdc, dibdc;
LPVOID lpvBits;
BITMAPINFO bitmapinfo;
DWORD dwBits[VIDEO_HEIGHT*VIDEO_WIDTH];
f64 graphdata[20][10000];
f64 graph_r[10000];
int numgraphs = 4;
int num_graph_data_points = 10000;
f64 maximum[20];
f64 truemax[20];
extern TriMesh * pTriMesh;
char * report_time(int action)
{
/* action = 0: reset ; action = 1: report */
/* requires timebuffy to be defined as char[255] globally */
static char timebuffer[255];
static clock_t start;
double timenow;
long ops;
if (action == 0)
{
start = clock();
}
else
{
timenow = ((double)(clock() - start) / (double)CLOCKS_PER_SEC);
ops = (long)(clock() - start);
/* create a null-terminated string */
sprintf(timebuffer, "%6.4f sec.", timenow);
};
return &(timebuffer[0]);
};
f64 GetTriangleArea(f64_vec2 pos0, f64_vec2 pos1, f64_vec2 pos2)
{
f64 area = 0.5*((pos0.x + pos1.x)*(pos1.y - pos0.y) + (pos1.x + pos2.x)*(pos2.y - pos1.y)
+ (pos0.x + pos2.x)*(pos0.y - pos2.y));
return fabs(area);
}
// Format constants
//const UINT32 VIDEO_WIDTH = 640;
//const UINT32 VIDEO_HEIGHT = 480;
const UINT32 VIDEO_FPS = 5;
const UINT64 VIDEO_FRAME_DURATION = 10 * 1000 * 1000 / VIDEO_FPS; // ?
const UINT32 VIDEO_BIT_RATE = 1048768;
const UINT32 VIDEO_PELS = VIDEO_WIDTH * VIDEO_HEIGHT;
const UINT32 VIDEO_FRAME_COUNT = 5;
// with 50 frames per nanosecond and 30 nanoseconds in file, it's 1500
// But to begin let's say 5?
const GUID VIDEO_INPUT_FORMAT = MFVideoFormat_RGB24;
HRESULT InitializeSinkWriter(
IMFSinkWriter **ppWriter,
DWORD *pStreamIndex,
LPCWSTR szFilename)
{
*ppWriter = NULL;
*pStreamIndex = NULL;
IMFSinkWriter *pSinkWriter = NULL;
IMFMediaType *pMediaTypeOut = NULL;
IMFMediaType *pMediaTypeIn = NULL;
DWORD streamIndex;
HRESULT hr = MFCreateSinkWriterFromURL(szFilename, NULL, NULL, &pSinkWriter);
// Set the output media type.
if (SUCCEEDED(hr))
hr = MFCreateMediaType(&pMediaTypeOut);
if (SUCCEEDED(hr))
hr = pMediaTypeOut->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
if (SUCCEEDED(hr))
hr = pMediaTypeOut->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
// whereas webcam capture sample says WMMEDIASUBTYPE_I420
if (SUCCEEDED(hr))
hr = pMediaTypeOut->SetUINT32(MF_MT_AVG_BITRATE, VIDEO_BIT_RATE);
if (SUCCEEDED(hr))
hr = pMediaTypeOut->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
if (SUCCEEDED(hr))
hr = MFSetAttributeSize(pMediaTypeOut, MF_MT_FRAME_SIZE, VIDEO_WIDTH, VIDEO_HEIGHT);
if (SUCCEEDED(hr))
hr = MFSetAttributeRatio(pMediaTypeOut, MF_MT_FRAME_RATE, VIDEO_FPS, 1);
if (SUCCEEDED(hr))
hr = MFSetAttributeRatio(pMediaTypeOut, MF_MT_PIXEL_ASPECT_RATIO, 1, 1);
if (SUCCEEDED(hr))
hr = pSinkWriter->AddStream(pMediaTypeOut, &streamIndex);
// Set the input media type.
if (SUCCEEDED(hr))
hr = MFCreateMediaType(&pMediaTypeIn);
if (SUCCEEDED(hr))
hr = pMediaTypeIn->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
if (SUCCEEDED(hr))
hr = pMediaTypeIn->SetGUID(MF_MT_SUBTYPE, VIDEO_INPUT_FORMAT);
if (SUCCEEDED(hr))
hr = pMediaTypeIn->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
// should that be 0 ?
if (SUCCEEDED(hr))
hr = MFSetAttributeSize(pMediaTypeIn, MF_MT_FRAME_SIZE, VIDEO_WIDTH, VIDEO_HEIGHT);
if (SUCCEEDED(hr))
hr = MFSetAttributeRatio(pMediaTypeIn, MF_MT_FRAME_RATE, VIDEO_FPS, 1);
if (SUCCEEDED(hr))
hr = MFSetAttributeRatio(pMediaTypeIn, MF_MT_PIXEL_ASPECT_RATIO, 1, 1);
if (SUCCEEDED(hr))
hr = pSinkWriter->SetInputMediaType(streamIndex, pMediaTypeIn, NULL);
// Tell the sink writer to start accepting data.
if (SUCCEEDED(hr)) {
hr = pSinkWriter->BeginWriting();
}
// Return the pointer to the caller.
if (SUCCEEDED(hr)) {
*ppWriter = pSinkWriter;
(*ppWriter)->AddRef();
*pStreamIndex = streamIndex;
}
SafeRelease(&pSinkWriter);
SafeRelease(&pMediaTypeOut);
SafeRelease(&pMediaTypeIn);
return hr;
}
HRESULT WriteFrame(
IMFSinkWriter *pWriter,
DWORD streamIndex,
const LONGLONG& rtStart // Time stamp.
)
{
IMFSample *pSample = NULL;
IMFMediaBuffer *pBuffer = NULL;
// SHOULD THIS BE 3 * ?
const LONG cbWidth = 3 * VIDEO_WIDTH; // 4 bytes --- why?
// so cbWidth is width in bytes
const DWORD cbBuffer = cbWidth * VIDEO_HEIGHT;
BYTE *pData = NULL;
// Create a new memory buffer.
HRESULT hr = MFCreateMemoryBuffer(cbBuffer, &pBuffer);
// Lock the buffer and copy the video frame to the buffer.
if (SUCCEEDED(hr))
hr = pBuffer->Lock(&pData, NULL, NULL);
if (SUCCEEDED(hr))
hr = MFCopyImage(
pData, // Destination buffer.
cbWidth, // Destination stride.
(BYTE *)lpvBits,//(BYTE*)videoFrameBuffer, // First row in source image.
cbWidth, // Source stride.
cbWidth, // Image width in bytes.
//I added x 3
VIDEO_HEIGHT // Image height in pixels.
);
if (pBuffer) pBuffer->Unlock();
// Set the data length of the buffer.
if (SUCCEEDED(hr))
hr = pBuffer->SetCurrentLength(cbBuffer);
// Create a media sample and add the buffer to the sample.
if (SUCCEEDED(hr))
hr = MFCreateSample(&pSample);
if (SUCCEEDED(hr))
hr = pSample->AddBuffer(pBuffer);
// Set the time stamp and the duration.
if (SUCCEEDED(hr))
hr = pSample->SetSampleTime(rtStart);
if (SUCCEEDED(hr))
hr = pSample->SetSampleDuration(VIDEO_FRAME_DURATION);
// Send the sample to the Sink Writer.
if (SUCCEEDED(hr))
hr = pWriter->WriteSample(streamIndex, pSample);
SafeRelease(&pSample);
SafeRelease(&pBuffer);
return hr;
}
void TriMesh::CalculateTotalGraphingData()
{
/*long iVertex;
Vertex * pVertex = X;
for (iVertex = 0; iVertex < numVertices; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pVertex->n = (pVertex->Neut.mass + pVertex->Ion.mass) / pVertex->AreaCell;
pVertex->v = (m_n*pVertex->Neut.mom + m_ion * pVertex->Ion.mom + m_e * pVertex->Elec.mom) /
(m_n*pVertex->Neut.mass + m_ion * pVertex->Ion.mass + m_e * pVertex->Elec.mass);
pVertex->T = (pVertex->Neut.heat + pVertex->Ion.heat + pVertex->Elec.heat) /
(pVertex->Neut.mass + pVertex->Ion.mass + pVertex->Elec.mass);
pVertex->Temp.x = pVertex->Ion.mass / (pVertex->Neut.mass + pVertex->Ion.mass);
};
++pVertex;
}*/
}
void TriMesh::Setup_J()
{
/*long iVertex;
Vertex * pVertex = X;
for (iVertex = 0; iVertex < numVertices; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pVertex->Temp = q * (pVertex->Ion.mom - pVertex->Elec.mom) / pVertex->AreaCell;
}
else {
memset(&(pVertex->Temp), 0, sizeof(Vector3));
}
++pVertex;
}*/
}
void surfacegraph::DrawSurface(const char * szname,
const int heightflag,
const real * var_ptr_0,
const int colourflag,
const real * var_ptr_c,
const bool bDisplayInner,
const int code, // graph code, to pass to called routines - sometimes useful
const TriMesh * pX // for passing to SetDataWithColour and Render
// and for working out offsets
)
{
// replaced CreateSurfaceGraphs.
// I think this is about the right balance.
char buff[256];
real * temprealptr = (real *)(pX->pData);
long offset = var_ptr_0 - temprealptr;
long offset_c = var_ptr_c - temprealptr;
// Does shader always go with colour type?? yes I think.
switch (colourflag) {
case VELOCITY_COLOUR:
this->mhTech = mFX->GetTechniqueByName("VelociTech");
break;
case SEGUE_COLOUR:
this->mhTech = mFX->GetTechniqueByName("SegueTech");
break;
case CURRENT_COLOUR:
this->mhTech = mFX->GetTechniqueByName("XYZTech");
break;
case AZSEGUE_COLOUR:
mhTech = mFX->GetTechniqueByName("AzSegueTech");
break;
case IONISE_COLOUR:
mhTech = mFX->GetTechniqueByName("IoniseTech");
break;
case PPN_COLOUR:
mhTech = mFX->GetTechniqueByName("ProportionTech"); // 1 = blue
break;
};
// Usual settings:
//if (GlobalGraphSetting[i] != GRAPH_NONE) {
this->boolDisplayShadow = true;
this->boolDisplayMainMesh = true;
this->boolDisplayMeshWireframe = GlobalboolDisplayMeshWireframe;
this->boolClearZBufferBeforeWireframe = false;
// Or try setting true and CULL_CCW to see if this stops it showing "the back of the wireframe"
this->SetEyeAndLookat(GlobalEye, GlobalLookat);
this->boolDisplayScales = true;
this->boolDisplayInnerMesh = bDisplayInner;
// work out whether to display key button:
if (((colourflag == FLAG_VELOCITY_COLOUR) || (colourflag == FLAG_CURRENT_COLOUR))
&& (bDisplayInner == 0))
{
this->boolDisplayKeyButton = true;
}
else {
this->boolDisplayKeyButton = false;
};
//int const FLAG_COLOUR_MESH = 0;
//int const FLAG_SEGUE_COLOUR = 1;
//int const FLAG_VELOCITY_COLOUR = 2;
//int const FLAG_CURRENT_COLOUR = 3;
//int const FLAG_AZSEGUE_COLOUR = 4;
//int const FLAG_IONISE_COLOUR = 5;
this->SetDataWithColour(*pX,
colourflag, heightflag, // apparently it's that way round
offset, offset_c,
code);
printf("DrawSurface code %d : calling Render:\n", code);
if (this->bDisplayTimestamp) {
sprintf(buff, "%6.2f ns", evaltime*1.0e9);
this->Render(szname, false, pX, buff);
}
else {
this->Render(szname, false, pX);
};
}
void Draw1Dgraph(int iWhichGraph, int flag)
{
float const MAXX = 11.0f;
float const MAXY = 6.0f;
float const YADJUST = -2.8f;
char graphname[4][128] = { "Azdot","Azdotdot","Lap Az","-4pi/c Jz" };
char Tgraphname[9][128] = { "conduction","ionization","viscosity","frictional","interspecies","dTe/dt total","compressive" ,
"DnT","undefined" };
char accelgraphname[9][128] = { "dvy/dt total", "v x B", "pressure", "neutral soak","viscosity", "ionization", "advection","grad_y Az" };
char Ohmsgraphname[20][128] = { "elastic effective fric coeff", "ionization effective fric coeff",
"thermal pressure y", "electromotive aez-aiz", "thermal force aiz-aez", "v-response T_zy", "v-response T_zz",
"T_zy * thermal pressure y", "T_zz * electromotive", "T_zz * thermal force", "Predicted vez-viz",
"Difference: prediction-vrelzk","vrelzk progress",
"viscous aez-aiz","Predicted Jz","Conductivity sigma_zy","Conductivity sigma_zz",
"sigma_zz * -electromotive", "Difference: Jz prediction-Jz","$$$" };
char arelzgraphname[12][128] = { "arelz", "MAR_ion contribution", "MAR_elec contribution",
"Ez_ext effect", "dAz/dt effect","v x B effect", "thermal force effect", "friction to neutrals",
"friction_ei", "sum of effects", "difference (error)"};
char buffer[256];
float x, y, z;
float zeroplane = 0.0f;
D3DXMATRIXA16 matWorld;
vertex1 linedata[10000];
vertex1 linedata2[12];
int iGraph;
D3DCOLOR colourlist[20];
char namelist[20][256];
bool bAlternating[20];
int numgraphs;
memset(bAlternating, 0, sizeof(bool) * 20);
if (flag == ONE_D) {
numgraphs = 4;
for (int i = 0; i < numgraphs; i++)
sprintf(namelist[i],"%s : graph max&min = +- %1.3E",
graphname[i], maximum[i]);
colourlist[0] = 0xff000000;
colourlist[1] = 0xff0022ff;
colourlist[2] = 0xffff0055;
colourlist[3] = 0xff22ff00;
};
if (flag == DTGRAPH) {
numgraphs = 8;
for (int i = 0; i < numgraphs; i++)
sprintf(namelist[i], "%s : graph max&min = +- %1.3E",
Tgraphname[i], maximum[i]);
colourlist[0] = 0xffffaa00; // conduction: orange red
colourlist[1] = 0xff0000ff; // ionization: royal blue
colourlist[2] = 0xff009999; // viscosity: aqua
colourlist[3] = 0xffd500ff; // resistive: heliotrope
colourlist[4] = 0xff00ff00; // soak: green
colourlist[5] = 0xff000000; // total
colourlist[6] = 0xff906545; // compressive: brown
};
if (flag == ACCELGRAPHS)
{
numgraphs = 8;
for (int i = 0; i < numgraphs; i++)
sprintf(namelist[i], "%s : graph max&min = +- %1.3E",
accelgraphname[i], maximum[i]);
colourlist[0] = 0xff000000; // total:
colourlist[1] = 0xffd500ff; // vxB: heliotrope
colourlist[2] = 0xffff2200; // pressure: red
colourlist[3] = 0xff00ff33; // soak: use green
colourlist[4] = 0xff009999; // viscosity: aqua
colourlist[5] = 0xff0000ff; // ionization : royal blue
colourlist[6] = 0xff906545; // advection : brown
colourlist[7] = 0xffeecd00; // grady_Az : olive?
}
if (flag == ARELZ)
{
numgraphs = 11;
for (int i = 0; i < numgraphs; i++)
sprintf(namelist[i], "%s : graph max&min = +- %1.3E",
arelzgraphname[i], maximum[i]);
colourlist[0] = 0xff000000; // total:
colourlist[1] = 0xff009999; // ion visc : aqua
colourlist[2] = 0xffeecd00; // elec visc : olive
colourlist[3] = 0xffdada66; // electromotive
colourlist[4] = 0xff4400ff; // inductive electromotive: indigo
colourlist[5] = 0xffd500ff; // vxB: heliotrope
colourlist[6] = 0xffff7700; // "thermal force effect"
colourlist[7] = 0xff00ff33; // neutral soak :green
colourlist[8] = 0xff00aa00; // dkgreen e-i friction
colourlist[9] = 0xffff2299; // pink : sum
colourlist[10] = 0xff000011; // navy
bAlternating[10] = true;
}
if (flag == OHMS2) {
numgraphs = 11;
for (int i = 0; i < numgraphs; i++)
sprintf(namelist[i], "%s :grmax+- %1.3E own|max| %1.3E",
Ohmsgraphname[i], maximum[i], truemax[i]);
colourlist[0] = 0xffcc0033; // elastic fric coeff: maroon
colourlist[1] = 0xff00aa00; // dkgreen ionization fric coeff
colourlist[2] = 0xffff0000; // pressure: red
colourlist[3] = 0xffda00ff; // electromotive: violet
colourlist[4] = 0xffff7700; // thermal force: orange
colourlist[5] = 0xff00aadd; bAlternating[5] = true; // Tzy
colourlist[6] = 0xff0000ff; // Tzz
colourlist[7] = 0xffff55aa; bAlternating[7] = true;
colourlist[8] = 0xffda00ff; bAlternating[8] = true;
colourlist[9] = 0xffffaa00; bAlternating[9] = true;
colourlist[10] = 0xff000000;
};
if (flag == OHMSLAW) {
numgraphs = 9;
for (int i = 0; i < numgraphs; i++)
sprintf(namelist[i], "%s :grmax+- %1.3E own|max| %1.3E",
Ohmsgraphname[i+10], maximum[i+10], truemax[i+10]);
colourlist[0] = 0xff000000;
colourlist[1] = 0xffff3333; // red: difference
colourlist[2] = 0xffaadd00; // lime yellow: progress
colourlist[3] = 0xff009999; // viscosity: aqua
colourlist[4] = 0xffd500ff; // prediction Jz
colourlist[5] = 0xff00bb33; // green conductivity
colourlist[6] = 0xff0022ff; // blue conductivity
colourlist[7] = 0xffda00ff; bAlternating[7] = true; // sigma_zz Ez
colourlist[8] = 0xff666666; // difference of prediction
}
f64 rmax = GRAPH1D_MAXR;
if (flag == ONE_D) rmax = DOMAIN_OUTER_RADIUS;
f64 rmin = DEVICE_RADIUS_INSULATOR_OUTER - 0.01;
if (flag == ONE_D) rmin = INNER_A_BOUNDARY;
Graph[iWhichGraph].SetEyeAndLookat(newEye, newLookat); // sets matView not matProj
printf("Eye %f %f %f\n", newEye.x, newEye.y, newEye.z);
Direct3D.pd3dDevice->SetViewport(&(Graph[iWhichGraph].vp));
D3DXMatrixIdentity(&matWorld);
//D3DXMatrixIdentity(&Graph[6].matProj); // ???????????????
Direct3D.pd3dDevice->SetTransform(D3DTS_WORLD, &matWorld);
Direct3D.pd3dDevice->SetTransform(D3DTS_VIEW, &(Graph[iWhichGraph].matView));
Direct3D.pd3dDevice->SetTransform(D3DTS_PROJECTION, &(Graph[iWhichGraph].matProj));
Direct3D.pd3dDevice->Clear(0, NULL, D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER,
D3DCOLOR_XRGB(250, 255, 250), 1.0f, 0);
if (SUCCEEDED(Direct3D.pd3dDevice->BeginScene()))
{
Direct3D.pd3dDevice->SetFVF(point_fvf);
real theta = -HALFANGLE;
real r = 3.44;
linedata[0].x = -MAXX;
linedata[0].z = 3.44*xzscale;
linedata[0].y = YADJUST;
linedata[0].colour = 0xff888888; // grey
linedata[1].x = -linedata[0].x;
linedata[1].y = YADJUST;
linedata[1].z = linedata[0].z;
linedata[1].colour = linedata[0].colour;
Direct3D.pd3dDevice->DrawPrimitiveUP(D3DPT_LINESTRIP, 1, linedata, sizeof(vertex1));
//Graph[6].RenderLabel2(buffer, // text
// MAXX*0.66f + 1.2f*(float)iGraph,
// 0.0f,
for (iGraph = 0; iGraph < numgraphs; iGraph++)
{
if (iGraph < 6) {
linedata[0].x = -MAXX;
linedata[0].z = 3.44*xzscale;
linedata[0].y = MAXY + 4.0f - 0.9f*(float)iGraph;
} else {
linedata[0].x = 0.8f;
linedata[0].z = 3.44*xzscale;
linedata[0].y = MAXY + 4.0f - 0.9f*(float)(iGraph-6);
}
linedata[1].x = linedata[0].x + 0.5f;
linedata[1].y = linedata[0].y;
linedata[1].z = linedata[0].z;
linedata[2].x = linedata[0].x + 1.0f;
linedata[2].y = linedata[0].y;
linedata[2].z = linedata[0].z;
linedata[0].colour = colourlist[iGraph];
linedata[1].colour = linedata[0].colour;
linedata[2].colour = linedata[0].colour;
if (bAlternating[iGraph]) linedata[1].colour = 0xffffffff;
Direct3D.pd3dDevice->DrawPrimitiveUP(D3DPT_LINESTRIP, 2, linedata, sizeof(vertex1));
Graph[iWhichGraph].RenderLabel2(namelist[iGraph], linedata[2].x + 0.1f, linedata[1].y - 0.3f, linedata[1].z, 0,0xff000000, true);
int asdf;
if (flag != OHMSLAW) {
for (asdf = 0; asdf < num_graph_data_points; asdf++)
{
linedata[asdf].x = (float)(MAXX - 2.0*MAXX*((graph_r[asdf] - rmin) /
(rmax - rmin)));
// map 0 to 0.0f, maximum[iGraph] to MAXY and -maximum[iGraph] to MINY
// Decide on graph scales maximum[] in preceding bit of code
linedata[asdf].y = YADJUST + (float)(MAXY*graphdata[iGraph][asdf] / maximum[iGraph]);
linedata[asdf].z = 3.44f*xzscale;
linedata[asdf].colour = colourlist[iGraph];
if ((bAlternating[iGraph]) && (asdf % 3 == 1)) linedata[asdf].colour = 0xffffffff;
};
Direct3D.pd3dDevice->DrawPrimitiveUP(D3DPT_LINESTRIP, num_graph_data_points - 1, linedata, sizeof(vertex1));
} else {
for (asdf = 0; asdf < num_graph_data_points; asdf++)
{
linedata[asdf].x = (float)(MAXX - 2.0*MAXX*((graph_r[asdf] - rmin) /
(rmax - rmin)));
// map 0 to 0.0f, maximum[iGraph] to MAXY and -maximum[iGraph] to MINY
// Decide on graph scales maximum[] in preceding bit of code
linedata[asdf].y = YADJUST + (float)(MAXY*graphdata[iGraph+10][asdf] / maximum[iGraph+10]);
linedata[asdf].z = 3.44f*xzscale;
linedata[asdf].colour = colourlist[iGraph];
if ((bAlternating[iGraph]) && (asdf % 3 == 1)) linedata[asdf].colour = 0xffffffff;
};
Direct3D.pd3dDevice->DrawPrimitiveUP(D3DPT_LINESTRIP, num_graph_data_points - 1, linedata, sizeof(vertex1));
};
//sprintf(buffer, "%2.2E", maximum[iGraph]);
//Graph[6].RenderLabel2(buffer, // text
// MAXX*0.66f + 1.2f*(float)iGraph,
// MAXY,
// linedata[0].z, 0, linedata[0].colour);
//sprintf(buffer
// linedata[0].z, 0, linedata[0].colour);
//sprintf(buffer, "-%2.2E", maximum[iGraph]);
//Graph[6].RenderLabel2(buffer, // text
// MAXX*0.66f + 1.2f*(float)iGraph,
// -MAXY,
// linedata[0].z, 0, linedata[0].colour);
};
// Vertical lines:
for (int i = 0; i < 9; i++)
{
x = 0.16*(-r*xzscale + 2.0*r*xzscale*(((real)i) / 8.0));
z = 3.44*xzscale;// (float)(cos(HALFANGLE)*DEVICE_RADIUS_INSULATOR_OUTER)*xzscale;
linedata[0].x = x; linedata[0].z = z;
linedata[1].x = x; linedata[1].z = z;
linedata[0].colour = 0xff220011;
linedata[1].colour = 0xff220011;
linedata[0].y = -6.8f + YADJUST;// GRAPHIC_MIN_Y - 1.0f;
linedata[1].y = YADJUST + (((i == 0) || (i == 8)) ? 6.0f : 0.0f);// GRAPHIC_MAX_Y + 2.5f;
Direct3D.pd3dDevice->DrawPrimitiveUP(D3DPT_LINESTRIP, 1, linedata, sizeof(vertex1));
sprintf(buffer, "%5.2f", rmin + (1.0 - ((real)i) / 8.0)*(rmax - rmin));
Graph[iWhichGraph].RenderLabel2(buffer, // text
linedata[0].x,
YADJUST - 7.6f,
linedata[0].z, 0);
};
//DXChk(mFX->SetValue(mhEyePos, &Eye, sizeof(D3DXVECTOR3)));
linedata[0].x = -0.16*r*xzscale;
linedata[0].y = YADJUST;
linedata[0].z = 3.44*xzscale;
linedata[0].colour = 0xff000000; //
linedata[1].x = 0.16*r*xzscale;
linedata[1].y = YADJUST;
linedata[1].z = linedata[0].z;
linedata[1].colour = linedata[0].colour;
Direct3D.pd3dDevice->DrawPrimitiveUP(D3DPT_LINESTRIP, 1, linedata, sizeof(vertex1));
Direct3D.pd3dDevice->EndScene();
}
else {
printf("BeginScene failed!\n\n");
getch();
}
}
void Create1DGraphingData(TriMesh * pX, bool bTdata = false, bool bAcceldata = false,
bool bOhmsData = false, bool b_arelz_data = false)
{
// Takes p_temphost3,4,5,6 and turns them into graphdata[iGraph=0,1,2,3][]
Vertex * pVertex, * pVert2;
f64_vec2 pos, pos0, pos1, pos2;
f64 dist0, dist1, dist2, wt0, wt1, wt2, wttotal, y0, y1, y2;
int iGraph, asdf, iWhich, iCorner, tri_len, i;
bool has_more, has_less, has_grad;
Triangle * pTri;
long izTri[MAXNEIGH];
long VertexIndexArray[10000];
num_graph_data_points = pX->GetVertsRightOfCutawayLine_Sorted(VertexIndexArray, graph_r, true);
printf("Xebeques furious\n Number of points %d\n", num_graph_data_points);
memset(maximum, 0, sizeof(f64) * 20);
// Method used in Render routine looks quite reasonable: find tri that crosses cutaway,
// use some kind of interp on tri. But we need to use values from p_temphost array not a graph position.
for (asdf = 0; asdf < num_graph_data_points; asdf++)
{
// if (asdf % 10 == 0) printf("<");
// printf("%d ; ", VertexIndexArray[asdf]);
pVertex = pX->X + VertexIndexArray[asdf];
// We want the tri directly to the left of it, through which (-1,0) passes.
// 1.Get these vertex indices
// which tri contains a point which is further and a point less far?
real rr = pVertex->pos.x*pVertex->pos.x + pVertex->pos.y*pVertex->pos.y;
iWhich = -1;
tri_len = pVertex->GetTriIndexArray(izTri);
for (i = 0; i < tri_len; i++)
{
pTri = pX->T + izTri[i];
has_more = false; has_less = false; has_grad = false;
for (iCorner = 0; iCorner < 3; iCorner++)
{
pVert2 = pTri->cornerptr[iCorner];
if (pVert2 != pVertex)
{
if (pVert2->pos.x*pVert2->pos.x + pVert2->pos.y*pVert2->pos.y > rr)
{
has_more = true;
}
else {
has_less = true;
};
};
if (pVert2->pos.x / pVert2->pos.y < pVertex->pos.x / pVertex->pos.y)
has_grad = true;
};
if (has_more && has_less && has_grad)
{
iWhich = i;
}
};
if (iWhich == -1) {// give up, do nothing}
printf("gave up. %d \n", VertexIndexArray[asdf]);
graphdata[0][asdf] = 0.0;
graphdata[1][asdf] = 0.0;
graphdata[2][asdf] = 0.0;
graphdata[3][asdf] = 0.0;
} else {
pTri = pX->T + izTri[iWhich];
while ((pTri->u8domain_flag != DOMAIN_TRIANGLE) && (iWhich >= 0)) {
pTri = pX->T + izTri[iWhich];
iWhich--;
};
iWhich++;
// we are needing to adjust graph_r and interp graphdata
pos.y = pVertex->pos.y;
pos.x = pVertex->pos.y*CUTAWAYANGLE; // can leave graph_r undisturbed
pos0 = pTri->cornerptr[0]->pos;
pos1 = pTri->cornerptr[1]->pos;
pos2 = pTri->cornerptr[2]->pos;
// if one sits at the CUTAWAYANGLE then we can get dist == 0.
dist0 = sqrt((pos0 - pos).dot(pos0 - pos));
dist1 = sqrt((pos1 - pos).dot(pos1 - pos));
dist2 = sqrt((pos2 - pos).dot(pos2 - pos));
if (dist0 == 0.0) {
wt0 = 1.0; wt1 = 0.0; wt2 = 0.0;
}
else {
if (dist1 == 0.0) {
wt0 = 0.0; wt1 = 1.0; wt2 = 0.0;
}
else {
if (dist2 == 0.0) {
wt0 = 0.0; wt1 = 0.0; wt2 = 1.0;
} else {
wt0 = 1.0f / dist0;
wt1 = 1.0f / dist1;
wt2 = 1.0f / dist2;
wttotal = wt0 + wt1 + wt2;
wt0 /= wttotal;
wt1 /= wttotal;
wt2 /= wttotal;
// Not a great way it has to be said.
}
}
}
if ((bTdata == false) && (bAcceldata == false) && (bOhmsData == false)
&& (b_arelz_data == false)) {
y0 = p_temphost3[(pTri->cornerptr[0] - pX->X) + BEGINNING_OF_CENTRAL];
y1 = p_temphost3[(pTri->cornerptr[1] - pX->X) + BEGINNING_OF_CENTRAL];
y2 = p_temphost3[(pTri->cornerptr[2] - pX->X) + BEGINNING_OF_CENTRAL];
graphdata[0][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if (fabs(graphdata[0][asdf]) > maximum[0]) maximum[0] = fabs(graphdata[0][asdf]);
if (numgraphs > 1) {
y0 = p_temphost4[(pTri->cornerptr[0] - pX->X) + BEGINNING_OF_CENTRAL];
y1 = p_temphost4[(pTri->cornerptr[1] - pX->X) + BEGINNING_OF_CENTRAL];
y2 = p_temphost4[(pTri->cornerptr[2] - pX->X) + BEGINNING_OF_CENTRAL];
graphdata[1][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if (fabs(graphdata[1][asdf]) > maximum[1]) maximum[1] = fabs(graphdata[1][asdf]);
};
if (numgraphs > 2) {
y0 = p_temphost5[(pTri->cornerptr[0] - pX->X) + BEGINNING_OF_CENTRAL];
y1 = p_temphost5[(pTri->cornerptr[1] - pX->X) + BEGINNING_OF_CENTRAL];
y2 = p_temphost5[(pTri->cornerptr[2] - pX->X) + BEGINNING_OF_CENTRAL];
graphdata[2][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if (fabs(graphdata[2][asdf]) > maximum[2]) maximum[2] = fabs(graphdata[2][asdf]);
};
if (numgraphs > 3) {
y0 = p_temphost6[(pTri->cornerptr[0] - pX->X) + BEGINNING_OF_CENTRAL];
y1 = p_temphost6[(pTri->cornerptr[1] - pX->X) + BEGINNING_OF_CENTRAL];
y2 = p_temphost6[(pTri->cornerptr[2] - pX->X) + BEGINNING_OF_CENTRAL];
graphdata[3][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if (fabs(graphdata[3][asdf]) > maximum[3]) maximum[3] = fabs(graphdata[3][asdf]);
}
} else {
// go through from 0 = conduction to 5 = dTe/dt itself
// we have missed out compressive...
if (bTdata) {
for (int j = 0; j < 8; j++)
{
y0 = p_Tgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_Tgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_Tgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[j][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if (fabs(graphdata[j][asdf]) > maximum[j]) maximum[j] = fabs(graphdata[j][asdf]);
}
} else {
if (bAcceldata) {
int j;
j = 1; // total
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[0][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
j = 3; // vxB
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[1][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
j = 5; // pressure
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[2][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
j = 6; // neutral soak
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[3][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
j = 8; // viscosity
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[4][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
j = 9; // ionization
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[5][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
j = 10; // advection
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[6][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
// works if comment here
// printf("%d ", asdf);
for (int j = 0; j < 7; j++)
{
// printf("%d", j);
if (fabs(graphdata[j][asdf]) > maximum[0]) {
maximum[0] = fabs(graphdata[j][asdf]);
// printf("maximum %1.9E\n", maximum[0]);
}
}
// does it work if comment here? no
j = 11; // grad_y Az
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[7][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if (fabs(graphdata[7][asdf]) > maximum[7]) maximum[7] = fabs(graphdata[7][asdf]);
} else {
if (bOhmsData) {
int j;
for (j = 0; j < 19; j++) {
y0 = p_Ohmsgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_Ohmsgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_Ohmsgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[j][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if ((pos.y < 4.6) && (pos.y > 3.44) && (fabs(graphdata[j][asdf]) > maximum[j])) maximum[j] = fabs(graphdata[j][asdf]);
};
} else {
int j;
for (j = 0; j < 12; j++) {
y0 = p_arelz_graph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_arelz_graph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_arelz_graph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[j][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if ((pos.y < 4.8) && (pos.y > 3.44) && (fabs(graphdata[j][asdf]) > maximum[j])) maximum[j] = fabs(graphdata[j][asdf]);
};
};
};
};
};
}; // found triangle
}; // asdf
if ((bTdata == false) && (bAcceldata == false) && (bOhmsData == false) && (b_arelz_data == false)) {
maximum[3] = max(maximum[3], maximum[2]);
maximum[2] = maximum[3];
} else {
// for dT graphs, let maximum be overall
if (bTdata) {
for (int j = 1; j <= 6; j++)
maximum[j] = max(maximum[j], maximum[j - 1]);
for (int j = 5; j >= 0; j--)
maximum[j] = maximum[j + 1];
} else {
if (bAcceldata) {
for (int j = 1; j < 7; j++)
maximum[j] = maximum[0];
} else {
if (bOhmsData) {
memcpy(truemax, maximum, sizeof(f64) * 20);
// use max 0 and 1 combined:
f64 temp = max(maximum[0], maximum[1]);
maximum[0] = temp;
maximum[1] = temp;
temp = max(max(maximum[2], maximum[3]), max(maximum[4], maximum[13]));
maximum[2] = temp;
maximum[3] = temp;
maximum[4] = temp; // thermal force
maximum[13] = temp; // viscous
temp = max(maximum[5], maximum[6]);
maximum[5] = temp;
maximum[6] = temp;
temp = max(max(maximum[7], maximum[8]), max(maximum[9], maximum[10]));
maximum[7] = temp;
maximum[8] = temp;
maximum[9] = temp;
maximum[10] = temp;
// temp = max(maximum[11], maximum[12]); // difference, progress
// maximum[11] = temp;
// maximum[12] = temp;
temp = max(maximum[14], maximum[17]);
maximum[14] = temp;
maximum[17] = temp;
temp = max(maximum[15], maximum[16]);
maximum[15] = temp;
maximum[16] = temp;
} else {
// All same scale except for "difference" = element 10
// ... and the arelz itself?
int j;
f64 temp = maximum[1];
for (j = 2; j < 9; j++)
temp = max(temp, maximum[j]);
for (j = 1; j < 9; j++)
maximum[j] = temp;
temp = max(maximum[0], maximum[9]);
maximum[0] = temp;
maximum[9] = temp; // actual vs sum
}
}
}
}
}
void RefreshGraphs(TriMesh & X, // only not const because of such as Reset_vertex_nvT
const int iGraphsFlag)
{
D3DXMATRIXA16 matWorld;
Vertex * pVertex;
long iVertex;
plasma_data * pdata;
int offset_v, offset_T;
char buff[256];
sprintf(buff, "%5.2f ns", evaltime*1.0e9);
f64 overc;
char buffer[256];
overc = 1.0 / c_;
float x, y, z;
float zeroplane = 0.0f;
int i;
int iGraph;
float const MAXX = 11.0f;
float const MAXY = 6.0f;
long iMinor;
switch (iGraphsFlag) {
case ONE_D:
// We are going to have to think about using LineTo the way it is done in RenderGraphs
// let's start by rendering in the x-y plane and we can let the present camera look at it
printf("\n\nGot to here: ONE_D\n\n");
// Create data:
Create1DGraphingData(&X);
Draw1Dgraph(6, ONE_D);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_temphost3[iVertex+BEGINNING_OF_CENTRAL];
++pVertex;
++pdata;
}
Graph[4].DrawSurface("Azdot",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true,
GRAPH_AZDOT, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_temphost4[iVertex + BEGINNING_OF_CENTRAL];
++pVertex;
++pdata;
}
Graph[1].DrawSurface("Azdotdot",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true,
GRAPH_AZDOT, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_temphost5[iVertex + BEGINNING_OF_CENTRAL];
++pdata;
}
Graph[3].DrawSurface("Lap Az",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true,
GRAPH_LAPAZ, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pdata->temp.x = p_temphost6[iVertex + BEGINNING_OF_CENTRAL];
}
else {
pdata->temp.x = 0.0;
}
++pdata;
}
Graph[5].DrawSurface("Jz",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true,
GRAPH_JZ, &X);
break;
case AZSOLVERGRAPHS:
pdata = X.pData;
// Bear in mind: iMinor won't actually get displayed
for (iMinor = 0; iMinor < NMINOR; iMinor++)
{
pdata->temp.x = p_temphost1[iMinor]; // epsilon
pdata->Azdot = p_temphost2[iMinor]; // Azdot0
pdata->temp.y = p_temphost3[iMinor]; // gamma
pdata->Az = p_temphost4[iMinor]; // Az
++pdata;
}
Graph[0].DrawSurface("epsilon",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true,
GRAPH_EPSILON, &X);
Graph[2].DrawSurface("Azdot0",
DATA_HEIGHT, (real *)(&(X.pData[0].Azdot)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].Azdot)),
true,
GRAPH_AZDOT, &X);
Graph[3].DrawSurface("regressorn",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.y)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.y)),
true,
GRAPH_OPTI, &X);
Graph[4].DrawSurface("Az",
DATA_HEIGHT, (real *)(&(X.pData[0].Az)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].Az)),
true,
GRAPH_AZ, &X);
pdata = X.pData;
for (iMinor = 0; iMinor < NMINOR; iMinor++)
{
pdata->temp.x = p_temphost5[iMinor]; // epsilon
pdata->temp.y = p_temphost6[iMinor]; // Azdot0
++pdata;
}
Graph[1].DrawSurface("regressori",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true,
GRAPH_LAPAZ, &X);
Graph[5].DrawSurface("Jacobi",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.y)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.y)),
true,
GRAPH_REGRESSOR, &X);
break;
case DTGRAPH:
// We are going to have to think about using LineTo the way it is done in RenderGraphs
// let's start by rendering in the x-y plane and we can let the present camera look at it
printf("\n\nRefreshGraphs: DTGRAPHS\n\n");
// Create data:
Create1DGraphingData(&X, true);
Draw1Dgraph(6, DTGRAPH);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_Tgraph_host[5][iVertex];
++pVertex;
++pdata;
}
Graph[4].DrawSurface("dTe/dt",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false,
GRAPH_DTE, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_Tgraph_host[7][iVertex];
++pVertex;
++pdata;
}
Graph[1].DrawSurface("d/dt nTe",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_DNT, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_Tgraph_host[5][iVertex];
++pdata;
}
Graph[3].DrawSurface("n",
DATA_HEIGHT, (real *)(&(X.pData[0].n)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_ION_N, &X);
Graph[5].DrawSurface("Te",
DATA_HEIGHT, (real *)(&(X.pData[0].Te)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false,
GRAPH_ELEC_T, &X);
break;
case OHMS2:
// We are going to have to think about using LineTo the way it is done in RenderGraphs
// let's start by rendering in the x-y plane and we can let the present camera look at it
printf("\n\nRefreshGraphs: OHMS2\n\n");
// Create data:
Create1DGraphingData(&X, false, false, true);
Draw1Dgraph(6, OHMS2);
Draw1Dgraph(7, OHMSLAW);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_Ohmsgraph_host[14][iVertex];
++pVertex;
++pdata;
}
Graph[4].DrawSurface("Jz prediction",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false,
GRAPH_JZ, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_Ohmsgraph_host[17][iVertex];
++pVertex;
++pdata;
}
Graph[5].DrawSurface("electromotive-only prediction",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false,
GRAPH_VE0Z, &X);
break;
case ACCELGRAPHS:
// We are going to have to think about using LineTo the way it is done in RenderGraphs
// let's start by rendering in the x-y plane and we can let the present camera look at it
printf("\n\nRefreshGraphs: ACCELGRAPHS\n\n");
// Create data:
Create1DGraphingData(&X, false, true);
Draw1Dgraph(6, ACCELGRAPHS);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_accelgraph_host[0][iVertex];
pdata->temp.y = p_accelgraph_host[1][iVertex];
++pVertex;
++pdata;
}
Graph[4].DrawSurface("dvxy/dt",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].temp.x)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_AXY, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_accelgraph_host[2][iVertex];
pdata->temp.y = p_accelgraph_host[3][iVertex];
++pVertex;
++pdata;
}
Graph[1].DrawSurface("axy : v x B",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].temp.x)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_AXY2, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_accelgraph_host[4][iVertex];
pdata->temp.y = p_accelgraph_host[5][iVertex];
++pdata;
}
Graph[3].DrawSurface("axy : pressure",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].temp.x)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_AXY3, &X);
Graph[5].DrawSurface("vxy",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].vxy)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].vxy)),
false,
GRAPH_ION_V, &X);
break;
case ARELZ:
// We are going to have to think about using LineTo the way it is done in RenderGraphs
// let's start by rendering in the x-y plane and we can let the present camera look at it
printf("\n\nRefreshGraphs: ARELZ\n\n");
// Create data:
Create1DGraphingData(&X, false, false, false, true);
Draw1Dgraph(6, ARELZ);
// Graphs:
// .. arelz
// .. electromotive
// .. v x B
// .. error
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_arelz_graph_host[0][iVertex];
pdata->temp.y = p_arelz_graph_host[0][iVertex];
++pVertex;
++pdata;
}
Graph[4].DrawSurface("arelz",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_ARELZ, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_arelz_graph_host[3][iVertex] +
p_arelz_graph_host[4][iVertex];
++pVertex;
++pdata;
}
Graph[1].DrawSurface("-e/m Ez_total",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_ELECTROMOTIVE, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_arelz_graph_host[5][iVertex];
++pdata;
}
Graph[3].DrawSurface("arelz : v x B",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_VXBARELZ, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_arelz_graph_host[10][iVertex];
if (pdata->temp.x > 1.0e13) {
printf("%d %1.9E | ", iVertex, pdata->temp.x);
}
++pdata;
}
Graph[5].DrawSurface("error in sum",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_ERROR, &X);
// Cannot explain why maximum on graph is 1e13 not 1e5 as reported on 1D graph.
break;
/*
case JXY_RHO_EXY_GRADPHI_AXYDOTOC_AXY:
X.Setup_J(); // the others can already exist.
Graph[4].bDisplayTimestamp = true;
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pVertex->temp2.x = q * (pVertex->Ion.mass - pVertex->Elec.mass) / pVertex->AreaCell;
}
else {
pVertex->temp2.x = 0.0;
};
pVertex->Adot /= c;
++pVertex;
}
Graph[0].DrawSurface("Exy[statV/cm]",
VELOCITY_HEIGHT, (real *)(&(X.X[0].E)),
VELOCITY_COLOUR, (real *)(&(X.X[0].E)),
true,
GRAPH_EXY, &X);
Graph[1].DrawSurface("Adotxy/c[statV/cm]",
VELOCITY_HEIGHT, (real *)(&(X.X[0].Adot.x)),
VELOCITY_COLOUR, (real *)(&(X.X[0].Adot.x)),
true,
GRAPH_ADOTXY, &X);
Graph[2].DrawSurface("Jxy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].Temp.x)),
VELOCITY_COLOUR, (real *)(&(X.X[0].Temp.x)),
false, // no inner mesh display.
GRAPH_JXY, &X);
Graph[3].DrawSurface("phidot[statV/s]",
DATA_HEIGHT, (real *)(&(X.X[0].phidot)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].phidot)),
true,
GRAPH_PHIDOT, &X);
Graph[4].DrawSurface("rho",
DATA_HEIGHT, (real *)(&(X.X[0].temp2.x)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].temp2.x)),
false, // no inner mesh display.
GRAPH_RHO, &X);
Graph[5].DrawSurface("phi[statV]",
DATA_HEIGHT, (real *)(&(X.X[0].phi)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].phi)),
true,
GRAPH_PHI, &X);
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
pVertex->Adot *= c;
++pVertex;
}
break;
case JZ_AZ_BXY_EZ_ADOTZOC_NVZ:
X.Reset_vertex_nvT(SPECIES_ELEC);
X.Setup_J(); // the others can already exist.
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
pVertex->Adot /= c;
++pVertex;
}
Graph[4].bDisplayTimestamp = true;
Graph[0].DrawSurface("Ez[statV/cm]",
DATA_HEIGHT, (real *)(&(X.X[0].E.z)),
FLAG_SEGUE_COLOUR, (real *)(&(X.X[0].E.z)),
false, // ??
GRAPH_EZ, &X);
Graph[1].DrawSurface("Az",
DATA_HEIGHT, (real *)(&(X.X[0].A.z)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].A.z)),
true,
GRAPH_AZ, &X);
Graph[2].DrawSurface("Jz",
DATA_HEIGHT, (real *)(&(X.X[0].Temp.z)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].Temp.z)),
false, // no inner mesh display.
GRAPH_JZ, &X);
Graph[3].DrawSurface("Bxy[Gs]",
VELOCITY_HEIGHT, (real *)(&(X.X[0].B)),
VELOCITY_COLOUR, (real *)(&(X.X[0].B)),
true, // no inner mesh display: ??
GRAPH_BXY, &X);
Graph[4].DrawSurface("Adotz/c [statV/cm]",
DATA_HEIGHT, (real *)(&(X.X[0].Adot.z)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].Adot.z)),
true,
GRAPH_AZ, &X);
Graph[5].colourmax = Graph[2].colourmax;
Graph[5].DrawSurface("Elec n",
DATA_HEIGHT, (real *)(&(X.X[0].n)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].Temp.z)),
false, // no inner mesh display
GRAPH_ELEC_N, &X);
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
pVertex->Adot *= c;
++pVertex;
}
break;
case SPECIES_ELECTRON2:
X.Reset_vertex_nvT(SPECIES_ELEC);
Graph[0].DrawSurface("Elec n [/cc]",
DATA_HEIGHT, (real *)(&(X.X[0].n)),
VELOCITY_COLOUR, (real *)(&(X.X[0].v)),
false, // no inner mesh display
GRAPH_ELEC_N, &X);
Graph[1].DrawSurface("v_e_xy[cm/s]",
VELOCITY_HEIGHT, (real *)(&(X.X[0].v)),
VELOCITY_COLOUR, (real *)(&(X.X[0].v)),
false, // no inner mesh display
GRAPH_ELEC_V, &X);
Graph[3].DrawSurface("v_e_z[cm/s]",
DATA_HEIGHT, (real *)(&(X.X[0].v.z)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].v.z)),
false, // no inner mesh display.
GRAPH_VEZ, &X);
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pVertex->temp2.x = (pVertex->Ion.mass - pVertex->Elec.mass) / pVertex->AreaCell;
}
else {
pVertex->temp2.x = 0.0;
};
++pVertex;
}
Graph[2].bDisplayTimestamp = false;
Graph[2].DrawSurface("n_i-n_e",
DATA_HEIGHT, (real *)(&(X.X[0].temp2.x)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].temp2.x)),
false, // no inner mesh display.
GRAPH_NINE, &X);
Graph[5].TickRescaling = 1.0 / kB;
Graph[5].DrawSurface("Elec T [eV]",
DATA_HEIGHT, (real *)(&(X.X[0].T)),
SEGUE_COLOUR, (real *)(&(X.X[0].T)),
false, // no inner mesh display
GRAPH_ELEC_T, &X);
Graph[5].TickRescaling = 1.0;
offset_v = (real *)(&(X.X[0].v)) - (real *)(&(X.X[0]));
offset_T = (real *)(&(X.X[0].T)) - (real *)(&(X.X[0]));
Graph[4].SetEyePlan(GlobalPlanEye);
Graph[4].boolDisplayMeshWireframe = true;
Graph[4].boolClearZBufferBeforeWireframe = true;
Graph[4].boolDisplayMainMesh = true;
Graph[4].boolDisplayInnerMesh = false;
Graph[4].boolDisplayScales = false;
Graph[4].boolDisplayShadow = false;
Graph[4].mhTech = Graph[4].mFX->GetTechniqueByName("VelociTech");
Graph[4].colourmax = Graph[0].colourmax; // match colours
Graph[4].SetDataWithColour(X, FLAG_VELOCITY_COLOUR, FLAG_FLAT_MESH, offset_v, offset_v,
GRAPH_FLAT_WIRE_MESH);
Graph[4].Render(buff, GlobalRenderLabels, &X);
break;
*/
case IONIZEGRAPH:
printf("\n\nRefreshGraphs: IONIZEGRAPHS\n\n");
// When we come to speed up graphs, make it so we can
// just pass an array of f64. !!!!
// Investigate graphs half an hour: what's up with the rest?
// Move table, start running.
// Can we bring back cutaway any how?
// Wanted acceleration graphs.
// Want to do a big run.
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_graphdata1_host[iVertex];
pdata->temp.y = p_graphdata2_host[iVertex]; // dn/dt /n
++pVertex;
++pdata;
}
Graph[0].DrawSurface("dn/dt",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false,
GRAPH_DNDT, &X);
Graph[1].DrawSurface("dn/dt / n",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.y)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false,
GRAPH_DNDT_OVER_n, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_graphdata3_host[iVertex]; // log10 n
++pVertex;
++pdata;
}
Graph[3].DrawSurface("log10(n)",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_LOG10N, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_graphdata4_host[iVertex]; // dTe/dt
pdata->temp.y = p_graphdata6_host[iVertex]; // n/nn
++pVertex;
++pdata;
}
Graph[2].DrawSurface("dTe/dt[ionization]",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false,
GRAPH_DTEDT, &X);
Graph[4].DrawSurface("n_e / n_total",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.y)),
IONISE_COLOUR, (real *)(&(X.pData[0].temp.y)),
false,
GRAPH_FRACTION, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_graphdata5_host[iVertex]; // dvez/dt
//if (pdata->vez != 0.0f) {
// pdata->temp.y = pdata->temp.x / (pdata->vez);
//} else {
// pdata->temp.y = 0.0;
//}
++pVertex;
++pdata;
}
printf("got to here 1");
Graph[5].DrawSurface("accel ez[ionization]",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].vez)),
false,
GRAPH_AEZ1, &X);
// Do we need another shader? Or can we reset limits?
// see what scale is like.
printf("got to here 2");
break;
case OVERALL:
printf("\n\nRefreshGraphs: OVERALL\n\n");
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pdata->temp.x = pdata->n + pdata->n_n;
pdata->temp.y = pdata->n / (1.0 + pdata->temp.x);
} else {
pdata->temp.x = 0.0;
pdata->temp.y = 0.0;
}
++pVertex;
++pdata;
}
Graph[0].DrawSurface("n_n + n_ion",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
IONISE_COLOUR, (real *)(&(X.pData[0].temp.y)),
false,
GRAPH_TOTAL_N, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pdata->temp.x = (m_neutral_*pdata->n_n*pdata->v_n.x
+ (m_ion_ + m_e_) * pdata->n*pdata->vxy.x) /
(m_neutral_*pdata->n_n + (m_ion_ + m_e_)*pdata->n);
pdata->temp.y = (m_neutral_*pdata->n_n*pdata->v_n.y
+ (m_ion_ + m_e_) * pdata->n*pdata->vxy.y) /
(m_neutral_*pdata->n_n + (m_ion_ + m_e_)*pdata->n);
} else {
pdata->temp.x = 0.0; pdata->temp.y = 0.0;
}
++pVertex;
++pdata;
}
Graph[1].DrawSurface("sum[n_s v_s m_s]/sum[n_s m_s]",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].temp.x)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display
GRAPH_TOTAL_V, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pdata->temp.x = (pdata->n_n*pdata->Tn
+ pdata->n*(pdata->Ti + pdata->Te)) /
(pdata->n_n + pdata->n + pdata->n);
} else {
pdata->temp.x = 0.0; pdata->temp.y = 0.0;
}
++pVertex;
++pdata;
}
Graph[3].TickRescaling = 1.0 / kB_;
Graph[3].DrawSurface("sum[n_s T_s]/sum[n_s]",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
SEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_TOTAL_T, &X);
Graph[3].TickRescaling = 1.0;
Graph[2].DrawSurface("Neutral n",
DATA_HEIGHT, (real *)(&(X.pData[0].n_n)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].v_n)),
false, // no inner mesh display
GRAPH_NEUT_N, &X);
Graph[4].DrawSurface("Neutral v",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].v_n)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].v_n)),
false, // no inner mesh display
GRAPH_NEUT_V, &X);
Graph[5].TickRescaling = 1.0 / kB_;
Graph[5].DrawSurface("Neutral T",
DATA_HEIGHT, (real *)(&(X.pData[0].Tn)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Tn)),
false, // no inner mesh display
GRAPH_NEUT_T, &X);
Graph[5].TickRescaling = 1.0;
break;
case SPECIES_ION:
printf("\n\nRefreshGraphs: SPECIES_ION\n\n");
Graph[3].TickRescaling = 1.0 / kB_;
Graph[3].DrawSurface("Ion T",
DATA_HEIGHT, (real *)(&(X.pData[0].Ti)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Ti)),
false, // no inner mesh display
GRAPH_ION_T, &X);
Graph[3].TickRescaling = 1.0;
// labels only appear on first 1 called.
Graph[0].DrawSurface("Ion n",
DATA_HEIGHT, (real *)(&(X.pData[0].n)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].vxy)),
false, // no inner mesh display
GRAPH_ION_N, &X);
Graph[1].DrawSurface("Ion v",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].vxy)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].vxy)),
false, // no inner mesh display
GRAPH_ION_V, &X);
// These are same so double up with elec.
Graph[5].TickRescaling = 1.0 / kB_;
Graph[5].DrawSurface("Elec T",
DATA_HEIGHT, (real *)(&(X.pData[0].Te)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false, // no inner mesh display
GRAPH_ELEC_T, &X);
Graph[5].TickRescaling = 1.0;
break;
/* case SPECIES_ELEC:
Graph[0].DrawSurface("Elec n",
DATA_HEIGHT, (real *)(&(X.pData[0].n)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].vxy)),
false, // no inner mesh display
GRAPH_ELEC_T, &X);
// colours == 0.0 ... because v = 0
// First........... let's understand why surface normals come out unpredictable.
// Then............ let's go and see what it does with y values (in Render and .fx)
Graph[1].DrawSurface("Elec v",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].vxy)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].vxy)),
false, // no inner mesh display
GRAPH_ELEC_V, &X);
break;
// In other cases, (and even for the above),
// here is a good place to call the
// setup routines for temp variables.
*/
case OHMSLAW:
printf("\n\nRefreshGraphs: OHMSLAW\n\n");
// 0. q/ m_e nu_sum
// 1. qn / m_e nu_sum
// 2. nu_sum
// 3. prediction of Jz from uniform Ez
// 4. prediction of Jz from actual Ez
// 5. Actual Jz
// Let temphost1 = nu_en + nu_ei_effective
// Let temphost2 = nu_en/temphost1
// Cannot explain why, that comes out black and this doesn't.
// Oh because colourmax has been set to 1 or not?
// Yet the following crashes it. Bizarre? Maybe dividing by 0?
overc = 1.0 / c_;
pdata = X.pData + BEGINNING_OF_CENTRAL;
pVertex = X.X;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pdata->temp.x = q_ / (m_e_ * (1.0 + p_temphost1[iVertex + BEGINNING_OF_CENTRAL]));
pdata->temp.y = p_temphost2[iVertex + BEGINNING_OF_CENTRAL]; // colour
} else {
pdata->temp.x = 0.0;
pdata->temp.y = 0.0;
}
++pdata;
++pVertex;
};
Graph[0].DrawSurface("q over m nu_effective",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
PPN_COLOUR, (real *)(&(X.pData[0].temp.y)),
false, // no inner mesh display.
GRAPH_VRESPONSEOHMS, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
pVertex = X.X;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pdata->temp.x = q_*X.pData[iVertex + BEGINNING_OF_CENTRAL].n /
(m_e_ * (1.0 + p_temphost1[iVertex + BEGINNING_OF_CENTRAL]));
pdata->temp.y = p_temphost2[iVertex + BEGINNING_OF_CENTRAL]; // colour
} else {
pdata->temp.x = 0.0;
pdata->temp.y = 0.0;
};
++pdata;
++pVertex;
};
Graph[1].DrawSurface("qn / m nu_effective",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
PPN_COLOUR, (real *)(&(X.pData[0].temp.y)),
false, // no inner mesh display.
GRAPH_CONDUCTIVITYOHMS, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
pVertex = X.X;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pdata->temp.x = p_temphost1[iVertex + BEGINNING_OF_CENTRAL];
pdata->temp.y = p_temphost2[iVertex + BEGINNING_OF_CENTRAL]; // colour
};
++pVertex;
++pdata;
};
Graph[2].DrawSurface("nu_effective (blue=neut dominates)",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
PPN_COLOUR, (real *)(&(X.pData[0].temp.y)),
false, // no inner mesh display.
GRAPH_NU_EFFECTIVE, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
pVertex = X.X;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pdata->temp.x = EzStrength_*q_*q_*X.pData[iVertex + BEGINNING_OF_CENTRAL].n /
(m_e_ * (1.0 + p_temphost1[iVertex + BEGINNING_OF_CENTRAL]));
};
++pdata;
};
Graph[3].DrawSurface("predict Jz (uniform Ez)",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_JZ, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
pVertex = X.X;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pdata->temp.x = (EzStrength_
- X.pData[iVertex + BEGINNING_OF_CENTRAL].Azdot*overc
)*q_*q_*X.pData[iVertex + BEGINNING_OF_CENTRAL].n /
(m_e_ * (1.0 + p_temphost1[iVertex + BEGINNING_OF_CENTRAL]));
};
++pdata;
};
Graph[4].DrawSurface("predict Jz (Ez)",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_JZ, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
pVertex = X.X;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pdata->temp.x = q_*X.pData[iVertex + BEGINNING_OF_CENTRAL].n*
(X.pData[iVertex + BEGINNING_OF_CENTRAL].viz - X.pData[iVertex + BEGINNING_OF_CENTRAL].vez);
};
++pdata;
};
Graph[5].DrawSurface("actual Jz",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_JZ, &X);
break;
case JZAZBXYEZ:
printf("\n\nRefreshGraphs: JZAZBXYEZ\n\n");
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = q_ * pdata->n*(pdata->viz - pdata->vez);
++pdata;
};
Graph[3].DrawSurface("Jz",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_JZ, &X);
// create graph data for Ez : add Ez_strength*Ezshape to -Azdot/c
overc = 1.0 / c_;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
X.pData[iVertex + BEGINNING_OF_CENTRAL].temp.y =
-X.pData[iVertex + BEGINNING_OF_CENTRAL].Azdot*overc
+ GetEzShape__(X.pData[iVertex + BEGINNING_OF_CENTRAL].pos.modulus())*EzStrength_;
}
Graph[2].DrawSurface("Ez",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.y)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)), // use Jz's colour
false,
GRAPH_EZ, &X);
Graph[0].DrawSurface("Az",
DATA_HEIGHT, (real *)(&(X.pData[0].Az)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].Az)),
true, GRAPH_AZ, &X);
// for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
// {
// X.pData[iVertex + BEGINNING_OF_CENTRAL].temp.x = p_B_host[iVertex + BEGINNING_OF_CENTRAL].x;
// X.pData[iVertex + BEGINNING_OF_CENTRAL].temp.y = p_B_host[iVertex + BEGINNING_OF_CENTRAL].y;
// }
Graph[1].DrawSurface("Bxy",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].B.x)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].B.x)),
false,
GRAPH_BXY, &X);
Graph[5].DrawSurface("vez",
DATA_HEIGHT, (real *)(&(X.pData[0].vez)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)), // colour is for Jz?
false, GRAPH_VEZ, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = -p_temphost3[iVertex + BEGINNING_OF_CENTRAL]/c_;
++pVertex;
++pdata;
}
Graph[4].DrawSurface("-Azdot/c",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true,
GRAPH_AZDOT, &X);
// pdata = X.pData + BEGINNING_OF_CENTRAL;
// for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
// {
// pdata->temp.x = temp_array_host[iVertex + BEGINNING_OF_CENTRAL];
// ++pdata;
// };
// Graph[4].DrawSurface("Lap Az",
// DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
// AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
// true, GRAPH_LAPAZ, &X);
break;
case VIZVEZJZAZDOT:
printf("\n\nRefreshGraphs: VIZVEZJZAZDOT\n\n");
// Set Jz:
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = q_ * pdata->n*(pdata->viz - pdata->vez);
++pdata;
};
Graph[0].DrawSurface("viz",
DATA_HEIGHT, (real *)(&(X.pData[0].viz)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, GRAPH_VIZ, &X);
Graph[1].DrawSurface("vez",
DATA_HEIGHT, (real *)(&(X.pData[0].vez)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, GRAPH_VEZ, &X);
Graph[2].DrawSurface("Azdot",
DATA_HEIGHT, (real *)(&(X.pData[0].Azdot)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].Azdot)),
true, GRAPH_AZDOT, &X);
Graph[3].DrawSurface("Jz",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, GRAPH_JZ, &X);
break;
/*
case NEWSTUFF:
// Too bad substep is not stated. We should divide by substep to give anything meaningful
// in these graphs.
// Let temphost3 = vez0
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_temphost3[iVertex + BEGINNING_OF_CENTRAL];
++pdata;
};
Graph[0].DrawSurface("vez0 : vez = vez0 + sigma Ez",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_VEZ0, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_OhmsCoeffs_host[iVertex + BEGINNING_OF_CENTRAL].sigma_e_zz;
++pdata;
};
Graph[1].DrawSurface("sigma : vez = vez0 + sigma Ez",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_RESPONSE, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = q_*X.pData[iVertex + BEGINNING_OF_CENTRAL].n*
(p_OhmsCoeffs_host[iVertex + BEGINNING_OF_CENTRAL].sigma_i_zz
- p_OhmsCoeffs_host[iVertex + BEGINNING_OF_CENTRAL].sigma_e_zz);
// Will show something not very useful ---- in a brief instant there
// isn't much time for second-order (frictional) effects.
++pdata;
};
Graph[2].DrawSurface("Ez=0 v addition: vez0-vez",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_DECEL, &X);
// Too bad substep is not stated. We should divide by substep to give anything meaningful
// in these graphs.
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = q_*X.pData[iVertex + BEGINNING_OF_CENTRAL].n*
(p_OhmsCoeffs_host[iVertex + BEGINNING_OF_CENTRAL].sigma_i_zz
- p_OhmsCoeffs_host[iVertex + BEGINNING_OF_CENTRAL].sigma_e_zz);
// Will show something not very useful ---- in a brief instant there
// isn't much time for second-order (frictional) effects.
++pdata;
};
Graph[3].DrawSurface("dynamic conductivity q n sigma : vez = vez0 + sigma Ez",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_DYNCONDUCTIVITY, &X);
// create graph data for Ez : add Ez_strength*Ezshape to -Azdot/c
overc = 1.0 / c_;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
X.pData[iVertex + BEGINNING_OF_CENTRAL].temp.y =
-X.pData[iVertex + BEGINNING_OF_CENTRAL].Azdot*overc
+ GetEzShape__(X.pData[iVertex + BEGINNING_OF_CENTRAL].pos.modulus())*EzStrength_;
}
Graph[4].DrawSurface("Ez",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.y)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)), // use Jz's colour
false,
GRAPH_EZ, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = q_ * pdata->n*(pdata->viz - pdata->vez);
++pdata;
};
Graph[5].DrawSurface("Jz",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_JZ, &X);
break;*/
case LAPAZ_AZ:
printf("\n\nRefreshGraphs: LAPAZ_AZ\n\n");
// Assume temp.x contains Lap Az
Graph[0].DrawSurface("Lap Az",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true, GRAPH_LAPAZ, &X);
Graph[1].DrawSurface("Az",
DATA_HEIGHT, (real *)(&(X.pData[0].Az)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].Az)),
true, GRAPH_AZ, &X);
Graph[2].DrawSurface("Azdot",
DATA_HEIGHT, (real *)(&(X.pData[0].Azdot)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].Azdot)),
true, GRAPH_AZDOT, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.y = q_ * pdata->n*(pdata->viz - pdata->vez);
++pdata;
};
Graph[3].DrawSurface("Jz",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.y)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.y)),
false, GRAPH_JZ, &X);
break;
case EXYCOMPONENTS:
/*
X.Setup_J(); // the others can already exist.
Graph[0].DrawSurface("Adotxy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].Adot.x)),
VELOCITY_COLOUR, (real *)(&(X.X[0].Adot.x)),
true,
GRAPH_ADOTXY, &X);
Graph[1].DrawSurface("Grad phi",
VELOCITY_HEIGHT, (real *)(&(X.X[0].GradTe)),
VELOCITY_COLOUR, (real *)(&(X.X[0].GradTe)),
true, // no inner mesh display: ??
GRAPH_GRADPHI, &X);
Graph[2].DrawSurface("Exy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].E)),
VELOCITY_COLOUR, (real *)(&(X.X[0].E)),
true,
GRAPH_EXY, &X);
Graph[3].DrawSurface("Jxy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].Temp.x)),
VELOCITY_COLOUR, (real *)(&(X.X[0].Temp.x)),
false, // no inner mesh display.
GRAPH_JXY, &X);
*/
// Set GradTe to grad phi
break;
case JXYAXYBZEXY:
/*
X.Setup_J(); // the others can already exist.
Graph[0].DrawSurface("Axy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].A.x)),
VELOCITY_COLOUR, (real *)(&(X.X[0].A.x)),
true,
GRAPH_AXY, &X);
Graph[1].DrawSurface("Bz",
DATA_HEIGHT, (real *)(&(X.X[0].B.z)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].B.z)),
true, // no inner mesh display: ??
GRAPH_BZ, &X);
Graph[2].DrawSurface("Exy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].E)),
VELOCITY_COLOUR, (real *)(&(X.X[0].E)),
true,
GRAPH_EXY, &X);
Graph[3].DrawSurface("Jxy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].Temp.x)),
VELOCITY_COLOUR, (real *)(&(X.X[0].Temp.x)),
false, // no inner mesh display.
GRAPH_JXY, &X);
*/
break;
case EXY_RHO_PHI_PHIDOT:
/*
// For this one do n_i-n_e
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pVertex->temp2.x = (pVertex->Ion.mass - pVertex->Elec.mass) / pVertex->AreaCell;
}
else {
pVertex->temp2.x = 0.0;
};
++pVertex;
}
Graph[0].DrawSurface("phi",
DATA_HEIGHT, (real *)(&(X.X[0].phi)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].phi)),
true,
GRAPH_PHI, &X);
Graph[1].DrawSurface("phidot",
DATA_HEIGHT, (real *)(&(X.X[0].phidot)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].phidot)),
true,
GRAPH_PHIDOT, &X);
Graph[2].DrawSurface("Exy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].E)),
VELOCITY_COLOUR, (real *)(&(X.X[0].E)),
true,
GRAPH_EXY, &X);
Graph[3].DrawSurface("n_i-n_e",
DATA_HEIGHT, (real *)(&(X.X[0].temp2.x)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].temp2.x)),
false, // no inner mesh display.
GRAPH_NINE, &X);
*/
break;
case EXY_RHO_PHI_JXY:
// create rho on pVertex->temp2.x ...
/*
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pVertex->temp2.x = q * (pVertex->Ion.mass - pVertex->Elec.mass) / pVertex->AreaCell;
}
else {
pVertex->temp2.x = 0.0;
};
++pVertex;
}
X.Setup_J();
Graph[0].DrawSurface("phi",
DATA_HEIGHT, (real *)(&(X.X[0].phi)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].phi)),
false,
GRAPH_PHI, &X);
Graph[1].DrawSurface("Jxy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].Temp)),
VELOCITY_COLOUR, (real *)(&(X.X[0].Temp)),
false, // no inner mesh display: ??
GRAPH_JXY, &X);
Graph[2].DrawSurface("Exy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].E)),
VELOCITY_COLOUR, (real *)(&(X.X[0].E)),
false,
GRAPH_EXY, &X);
Graph[3].DrawSurface("rho",
DATA_HEIGHT, (real *)(&(X.X[0].temp2.x)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].temp2.x)),
false, // no inner mesh display.
GRAPH_RHO, &X);
*/
break;
case EXY_RHO_BZ_JXY:
/*
// create rho on pVertex->temp2.x ...
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pVertex->temp2.x = q * (pVertex->Ion.mass - pVertex->Elec.mass) / pVertex->AreaCell;
}
else {
pVertex->temp2.x = 0.0;
};
++pVertex;
}
X.Setup_J();
Graph[0].DrawSurface("Bz",
DATA_HEIGHT, (real *)(&(X.X[0].B.z)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].B.z)),
true, // no inner mesh display: ??
GRAPH_BZ, &X);
Graph[1].DrawSurface("Jxy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].Temp)),
VELOCITY_COLOUR, (real *)(&(X.X[0].Temp)),
false, // no inner mesh display: ??
GRAPH_JXY, &X);
Graph[2].DrawSurface("Exy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].E)),
VELOCITY_COLOUR, (real *)(&(X.X[0].E)),
false,
GRAPH_EXY, &X);
Graph[3].DrawSurface("rho",
DATA_HEIGHT, (real *)(&(X.X[0].temp2.x)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].temp2.x)),
false, // no inner mesh display.
GRAPH_RHO, &X);
*/
break;
//case SIGMA_E_J:
/*
X.Setup_J(); // the others can already exist.
Graph[0].DrawSurface("sigma_e_zz",
DATA_HEIGHT, (real *)(&(X.X[0].sigma_e.zz)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].sigma_e.zz)),
true,
GRAPH_SIGMA_E, &X);
//Graph[1].DrawSurface("v_e_0.z",
// DATA_HEIGHT,(real *)(&(X.X[0].v_e_0.z)),
// AZSEGUE_COLOUR,(real *)(&(X.X[0].v_e_0.z)),
//false, // no inner mesh display: ??
// GRAPH_VE0Z, &X);
Graph[1].DrawSurface("nsigma",
DATA_HEIGHT, (real *)(&(X.X[0].xdotdot.x)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].xdotdot.x)),
true, GRAPH_SIGMATEMP, &X);
Graph[2].DrawSurface("Ez",
DATA_HEIGHT, (real *)(&(X.X[0].E.z)),
FLAG_AZSEGUE_COLOUR, (real *)(&(X.X[0].E.z)), // how to make SEGUE_COLOUR work?
false, // ??
GRAPH_EZ, &X);
Graph[3].DrawSurface("Jz",
DATA_HEIGHT, (real *)(&(X.X[0].Temp.z)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].Temp.z)),
false, // no inner mesh display.
GRAPH_JZ, &X);
*/
// break;
case TOTAL:
// In this case we have to create data,
// as we go.
// Best put it here so we can see where
// data is being populated.
/*long iVertex;
Vertex * pVertex = X;
for (iVertex = 0; iVertex < numVertices; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pVertex->n = (pVertex->Neut.mass + pVertex->Ion.mass) / pVertex->AreaCell;
pVertex->v = (m_n*pVertex->Neut.mom + m_ion * pVertex->Ion.mom + m_e * pVertex->Elec.mom) /
(m_n*pVertex->Neut.mass + m_ion * pVertex->Ion.mass + m_e * pVertex->Elec.mass);
pVertex->T = (pVertex->Neut.heat + pVertex->Ion.heat + pVertex->Elec.heat) /
(pVertex->Neut.mass + pVertex->Ion.mass + pVertex->Elec.mass);
pVertex->Temp.x = pVertex->Ion.mass / (pVertex->Neut.mass + pVertex->Ion.mass);
};
++pVertex;
}*/
//X.CalculateTotalGraphingData();
printf("\n\nRefreshGraphs: TOTAL\n\n");
// ought to change this to use variables n,v,T !
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pdata->temp.x = pdata->n + pdata->n_n;
pdata->temp.y = pdata->n / pdata->temp.x;
}
++pVertex;
++pdata;
}
Graph[0].DrawSurface("n_n + n_ion",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
IONISE_COLOUR, (real *)(&(X.pData[0].temp.y)),
false,
GRAPH_TOTAL_N, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pdata->temp.x = (m_neutral_*pdata->n_n*pdata->v_n.x
+ (m_ion_ + m_e_) * pdata->n*pdata->vxy.x) /
(m_neutral_*pdata->n_n + (m_ion_ + m_e_)*pdata->n);
pdata->temp.y = (m_neutral_*pdata->n_n*pdata->v_n.y
+ (m_ion_ + m_e_) * pdata->n*pdata->vxy.y) /
(m_neutral_*pdata->n_n + (m_ion_ + m_e_)*pdata->n);
}
++pVertex;
++pdata;
}
Graph[1].DrawSurface("sum[n_s v_s m_s]/sum[n_s m_s]",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].temp.x)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display
GRAPH_TOTAL_V, &X);
//Graph[2].DrawSurface("n_n+n_ion",
//DATA_HEIGHT, (real *)(&(X.X[0].n)),
//VELOCITY_COLOUR, (real *)(&(X.X[0].v)),
//false,
//GRAPH_TOTAL_N_II, &X); // ok what we did here? we thought we'd colour with velocity .. but we haven't given ourselves room for 3 temp vars so drop this for now.
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pdata->temp.x = (pdata->n_n*pdata->Tn
+ pdata->n*(pdata->Ti + pdata->Te)) /
(pdata->n_n + pdata->n + pdata->n);
}
++pVertex;
++pdata;
}
Graph[3].TickRescaling = 1.0 / kB_;
Graph[3].DrawSurface("sum[n_s T_s]/sum[n_s]",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
SEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_TOTAL_T, &X);
Graph[3].TickRescaling = 1.0;
break;
};
// Graph 2 and 4, in case of species graphs:
switch (iGraphsFlag) {
//case SPECIES_NEUTRAL:
case SPECIES_ION:
//case SPECIES_ELEC:
//case TOTAL:
int offset_v, offset_T;
offset_v = (real *)(&(X.pData[0].vxy)) - (real *)(&(X.pData[0]));
offset_T = (real *)(&(X.pData[0].Te)) - (real *)(&(X.pData[0]));
Graph[2].SetEyePlan(GlobalPlanEye);
Graph[2].boolDisplayMeshWireframe = true;
Graph[2].boolClearZBufferBeforeWireframe = true;
Graph[2].boolDisplayMainMesh = true;
Graph[2].boolDisplayInnerMesh = false;
Graph[2].boolDisplayScales = false;
if (GlobalColoursPlanView == 0)
{
// nothing
Graph[2].mhTech = Graph[2].mFX->GetTechniqueByName("MeshTech");
Graph[2].SetDataWithColour(X, FLAG_COLOUR_MESH, FLAG_FLAT_MESH, 0, 0,
GRAPH_FLAT_WIRE_MESH);
Graph[2].Render(buff, GlobalRenderLabels, &X);
} else {
// Tell SDWC not to mess with colourmax if it's a flat mesh.
if (GlobalColoursPlanView == 1)
{
// velocity
Graph[2].mhTech = Graph[2].mFX->GetTechniqueByName("VelociTech");
Graph[2].colourmax = Graph[0].colourmax; // match colours
Graph[2].SetDataWithColour(X, FLAG_VELOCITY_COLOUR, FLAG_FLAT_MESH, offset_v, offset_v,
GRAPH_FLAT_WIRE_MESH);
Graph[2].Render(buff, GlobalRenderLabels, &X);
};
////else {
//// // temperature
//// Graph[2].mhTech = Graph[2].mFX->GetTechniqueByName("SegueTech");
//// // SegueVS should take maximum as a parameter;
//// // at least for colours we should prefer an absolute scale for T
//// // Is it ever used for anything else? Not so far? eps?
//// Graph[2].SetDataWithColour(X, FLAG_SEGUE_COLOUR, FLAG_FLAT_MESH, offset_T, offset_T,
//// GRAPH_FLAT_WIRE_MESH);
//// Graph[2].Render(buff, GlobalRenderLabels, &X);
////};
};
// =================================================================================
printf("\ngot to here; graph [4]:\n\n");
Graph[4].boolDisplayKeyButton = false; // it's temperature
Graph[4].SetEyePlan(GlobalPlanEye);
Graph[4].boolDisplayMeshWireframe = true;
Graph[4].boolClearZBufferBeforeWireframe = true;
Graph[4].boolDisplayMainMesh = true;
Graph[4].boolDisplayInnerMesh = false;
Graph[4].boolDisplayScales = false;
Graph[4].mhTech = Graph[4].mFX->GetTechniqueByName("SegueTech");
Graph[4].SetDataWithColour(X, FLAG_SEGUE_COLOUR, FLAG_FLAT_MESH, offset_T, offset_T,
GRAPH_FLAT_WIRE_MESH);
Graph[4].Render(buff, GlobalRenderLabels, &X);
break;
}
printf("End of Refreshgraphs\n");
}
IMFSinkWriter *pSinkWriter[NUMAVI];
DWORD izStream[NUMAVI];
HRESULT hresult;
LONGLONG rtStart = 0;
int main()
{
printf("hello\n");
HINSTANCE hInstance = GetModuleHandle(NULL);
HWND hwndConsole = GetConsoleWindow();
WCHAR szInitialFilenameAvi[512];
MSG msg;
HDC hdc;
// HACCEL hAccelTable;
real x, y, temp;
int i, j;
float a1, a2, a3, a4;
//HWND hwndConsole;
FILE * fp;
extern char Functionalfilename[1024];
int nDevices, iWhich;
hipDeviceProp_t prop;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
if (prop.memoryBusWidth == 384) iWhich = i;
}
printf("Picked %d \n", iWhich);
getch();
hipSetDevice(iWhich); // K40?
hipDeviceReset();
size_t uFree, uTotal;
hipMemGetInfo(&uFree, &uTotal);
printf("Memory on device: uFree %zd uTotal %zd\n", uFree, uTotal);
HRESULT hr = CoInitializeEx(NULL, COINIT_APARTMENTTHREADED);
if (!SUCCEEDED(hr)) {
printf("CoInitializeEx failed. press p\n");
while (getch() != 'p');
exit(23233);
}
hr = MFStartup(MF_VERSION);
if (!SUCCEEDED(hr)) {
printf("MFStartup failed. press p\n");
while (getch() != 'p');
exit(23234);
}
h = TIMESTEP;
evaltime = 0.0; // gets updated before advance
memset(Historic_powermax, 0, 200 * sizeof(int));
memset(Historic_powermin, 0, 200 * sizeof(int));
ZeroMemory(Historic_max, 512 * HISTORY * sizeof(float));
ZeroMemory(Historic_min, 512 * HISTORY * sizeof(float));
GlobalStepsCounter = 0; steps_remaining = 0; steps_remaining_CPU = 0;
SetConsoleTitle("2D 1/16 annulus DPF simulation");
Sleep(40);
//hwndConsole = FindWindow(NULL, "2D 1/16 annulus DPF simulation");
MoveWindow(hwndConsole, 0, 0, SCREEN_WIDTH - VIDEO_WIDTH - 10, SCREEN_HEIGHT - 30, TRUE);
report_time(0);
int filetag = 0;
do {
filetag++;
sprintf(Functionalfilename, FUNCTIONALFILE_START "%03d.txt", filetag);
} while ((_access(Functionalfilename, 0)) != -1);
printf("\n\nopening %s \n", Functionalfilename);
fp = fopen(Functionalfilename, "w");
if (fp == 0) {
printf("error with %s \n", Functionalfilename);
getch();
}
else {
printf("opened %s \n", Functionalfilename);
};
fprintf(fp, "GSC evaltime Area neut.N ion.N elec.N neut.r ion.r elec.r SDneut.r SDion.r SDelec.r "
" neut.vr neut.vth neut.vz ion.vr ion.vth ion.vz elec.vr elec.vth elec.vz neut.heat ion.heat elec.heat neut.T ion.T elec.T "
" neut.mnvv/3 ion.mnvv/3 elec.mnvv/3 elec.force(vxB)r within3.6 elec.Bth EE BB Heatings and dT changes - see code \n");
fclose(fp);
X1.Initialise(1); // Set evaltime first
X2.Initialise(2);
X3.Initialise(3);
printf("Got to here 1\n");
{
X4.Initialise(4);
printf("Got to here 2\n");
X4.CreateTilingAndResequence2(&X1);
X4.CreateTilingAndResequence2(&X2);
X4.CreateTilingAndResequence2(&X3);
printf("Got to here 3\n");
//
// Dropping it for now so we can pursue solving equations first.
//
}
X1.Recalculate_TriCentroids_VertexCellAreas_And_Centroids();
X1.EnsureAnticlockwiseTriangleCornerSequences_SetupTriMinorNeighboursLists();
X1.SetupMajorPBCTriArrays();
X2.Recalculate_TriCentroids_VertexCellAreas_And_Centroids();
X2.EnsureAnticlockwiseTriangleCornerSequences_SetupTriMinorNeighboursLists();
X2.SetupMajorPBCTriArrays();
X3.Recalculate_TriCentroids_VertexCellAreas_And_Centroids();
X3.EnsureAnticlockwiseTriangleCornerSequences_SetupTriMinorNeighboursLists();
X3.SetupMajorPBCTriArrays();
printf("Got to here 4\n");
X1.InitialPopulate();
X2.InitialPopulate();
X3.InitialPopulate();
X1.Create4Volleys();
X2.Create4Volleys();
X3.Create4Volleys();
pTriMesh = &X1;
pX = &X1;
pXnew = &X2;
GlobalBothSystemsInUse = 0;
printf(report_time(1));
printf("\n");
report_time(0);
// Window setup
LoadString(hInstance, IDS_APP_TITLE, szTitle, 1024);
LoadString(hInstance, IDC_F2DVALS, szWindowClass, 1024);
wcex.cbSize = sizeof(WNDCLASSEX);
wcex.style = CS_HREDRAW | CS_VREDRAW;
wcex.lpfnWndProc = WndProc;
wcex.cbClsExtra = 0;
wcex.cbWndExtra = 0;
wcex.hInstance = hInstance;
wcex.hIcon = LoadIcon(hInstance, MAKEINTRESOURCE(IDI_F2DVALS));
wcex.hCursor = LoadCursor(NULL, IDC_ARROW);
wcex.hbrBackground = (HBRUSH)(COLOR_WINDOW + 1);
wcex.lpszMenuName = MAKEINTRESOURCE(IDR_MENU1);
wcex.lpszClassName = szWindowClass;
wcex.hIconSm = LoadIcon(wcex.hInstance, MAKEINTRESOURCE(IDI_SMALL));
if (RegisterClassEx(&wcex) == 0) {
char buff[128];
MessageBox(NULL, "RegisterClassEx failed", itoa(GetLastError(), buff, 10), MB_OK);
};
printf("SCREEN_WIDTH %d VIDEO_WIDTH %d VIDEO_HEIGHT %d \n",
SCREEN_WIDTH, VIDEO_WIDTH, VIDEO_HEIGHT);
hWnd = CreateWindowEx(NULL, szWindowClass, szTitle, WS_BORDER | WS_POPUP,
SCREEN_WIDTH - VIDEO_WIDTH - 5, 0, VIDEO_WIDTH + 5, VIDEO_HEIGHT + 20, NULL, NULL, hInstance, NULL);
if (!hWnd) {
DWORD dword = GetLastError();
char buff[128];
MessageBox(NULL, "CreateWindowEx failed", itoa(dword, buff, 10), MB_OK);
return dword;
}
// This is sending a message to WndProc before any of the following happens.
ShowWindow(hWnd, SW_SHOWNORMAL);
UpdateWindow(hWnd);
hwndGraphics = hWnd;
xzscale = 2.0 / 0.1; // very zoomed in. Now what?
DXChk(Direct3D.Initialise(hWnd, hInstance, VIDEO_WIDTH, VIDEO_HEIGHT));
// With Field Of View = PI/4 used this:
/*
GlobalEye.x = 0.0f;
GlobalEye.y = 12.4f; //7.2f;
GlobalEye.z = -18.0f + 2.5*xzscale;//DEVICE_RADIUS_INSULATOR_OUTER*xzscale;//-17.8f+
GlobalLookat.x = 0.4f;
GlobalLookat.y = 3.0f;
GlobalLookat.z = DEVICE_RADIUS_INITIAL_FILAMENT_CENTRE * xzscale;
GlobalPlanEye.x = 0.0f;
GlobalPlanEye.y = 35.0f;
GlobalPlanEye.z = (3.44 + 4.1)*0.5*xzscale;
GlobalPlanEye2.x = -0.1f;
GlobalPlanEye2.y = 19.5f;
GlobalPlanEye2.z = 2.8*xzscale;
GlobalPlanLookat.x = GlobalPlanEye.x;
GlobalPlanLookat.y = 0.0f;
GlobalPlanLookat.z = GlobalPlanEye.z + 0.0001;
GlobalPlanLookat2.x = GlobalPlanEye2.x;
GlobalPlanLookat2.y = 0.0f;
GlobalPlanLookat2.z = GlobalPlanEye2.z + 0.0001;*/
GlobalEye.x = -10.4f;
GlobalEye.y = 16.4f; //7.2f;
GlobalEye.z = 44.0f;
GlobalLookat.x = 1.20f;
GlobalLookat.y = 3.0f;
GlobalLookat.z = 72.2f;
GlobalPlanEye.x = 2.9f;
GlobalPlanEye.y = 17.97f;
GlobalPlanEye.z = 71.95f;
GlobalPlanEye2.x = -0.1f;
GlobalPlanEye2.y = 19.5f;
GlobalPlanEye2.z = 2.8*xzscale;
GlobalPlanLookat.x = GlobalPlanEye.x;
GlobalPlanLookat.y = 0.0f;
GlobalPlanLookat.z = GlobalPlanEye.z + 0.0001;
GlobalPlanLookat2.x = GlobalPlanEye2.x;
GlobalPlanLookat2.y = 0.0f;
GlobalPlanLookat2.z = GlobalPlanEye2.z + 0.0001;
newEye.x = 0.0f;
newEye.y = 0.1f;
newEye.z = 40.0f;
newLookat.x = 0.0f;
newLookat.y = 0.0f;
newLookat.z = 72.0f;
// Add vectors in parallel.
hipError_t cudaStatus;
if (DXChk(Graph[0].InitialiseWithoutBuffers(0, 0, GRAPH_WIDTH, GRAPH_HEIGHT, GlobalEye, GlobalLookat)) +
DXChk(Graph[0].InitialiseBuffers(X1))
)
{
PostQuitMessage(200);
};
if (DXChk(Graph[1].InitialiseWithoutBuffers(0, GRAPH_HEIGHT, GRAPH_WIDTH, GRAPH_HEIGHT, GlobalEye, GlobalLookat)) +
DXChk(Graph[1].InitialiseBuffers(X1))
)
{
PostQuitMessage(201);
};
if (DXChk(Graph[2].InitialiseWithoutBuffers(GRAPH_WIDTH, 0, GRAPH_WIDTH, GRAPH_HEIGHT, GlobalPlanEye, GlobalPlanLookat)) +
DXChk(Graph[2].InitialiseBuffers(X1))
)
{
PostQuitMessage(202);
};
if (DXChk(Graph[3].InitialiseWithoutBuffers(GRAPH_WIDTH, GRAPH_HEIGHT, GRAPH_WIDTH, GRAPH_HEIGHT, GlobalEye, GlobalLookat)) +
DXChk(Graph[3].InitialiseBuffers(X1))
)
{
PostQuitMessage(203);
};
if (NUMGRAPHS > 4) {
if (DXChk(Graph[4].InitialiseWithoutBuffers(GRAPH_WIDTH * 2, 0, GRAPH_WIDTH, GRAPH_HEIGHT, GlobalPlanEye, GlobalPlanLookat)) +
DXChk(Graph[4].InitialiseBuffers(X1))
)
{
PostQuitMessage(204);
};
if (DXChk(Graph[5].InitialiseWithoutBuffers(GRAPH_WIDTH * 2, GRAPH_HEIGHT, GRAPH_WIDTH, GRAPH_HEIGHT, GlobalEye, GlobalLookat)) +
DXChk(Graph[5].InitialiseBuffers(X1))
)
{
PostQuitMessage(204);
};
if (DXChk(Graph[6].InitialiseWithoutBuffers(0, 0, GRAPH_WIDTH*2, GRAPH_HEIGHT, newEye, GlobalLookat, true)) +
DXChk(Graph[6].InitialiseBuffers(X1))
)
{
PostQuitMessage(204);
};
if (DXChk(Graph[7].InitialiseWithoutBuffers(0, GRAPH_HEIGHT, GRAPH_WIDTH * 2, GRAPH_HEIGHT, newEye, GlobalLookat, true)) +
DXChk(Graph[7].InitialiseBuffers(X1))
)
{
PostQuitMessage(204);
};
};
Graph[0].bDisplayTimestamp = false;
Graph[1].bDisplayTimestamp = false;
Graph[2].bDisplayTimestamp = false;
Graph[3].bDisplayTimestamp = false;
Graph[4].bDisplayTimestamp = true;
Graph[5].bDisplayTimestamp = false;
Graph[6].bDisplayTimestamp = true;
Graph[7].bDisplayTimestamp = false;
Direct3D.pd3dDevice->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO, &p_backbuffer_surface);
if (DXChk(p_backbuffer_surface->GetDC(&surfdc), 1000))
MessageBox(NULL, "GetDC failed", "oh dear", MB_OK);
surfbit = CreateCompatibleBitmap(surfdc, VIDEO_WIDTH, VIDEO_HEIGHT); // EXTRAHEIGHT = 90
SelectObject(surfdc, surfbit);
dibdc = CreateCompatibleDC(surfdc);
long VideoWidth = VIDEO_WIDTH;
long VideoHeight = VIDEO_HEIGHT;
// pasted here just to set up format:
bitmapinfo.bmiHeader.biSize = sizeof(BITMAPINFO);
bitmapinfo.bmiHeader.biWidth = VideoWidth;
bitmapinfo.bmiHeader.biHeight = VideoHeight;
bitmapinfo.bmiHeader.biPlanes = 1;
bitmapinfo.bmiHeader.biBitCount = 24;
bitmapinfo.bmiHeader.biCompression = BI_RGB; // uncompressed
bitmapinfo.bmiHeader.biSizeImage = bitmapinfo.bmiHeader.biHeight;
bitmapinfo.bmiHeader.biXPelsPerMeter = 3000;
bitmapinfo.bmiHeader.biYPelsPerMeter = 3000;
bitmapinfo.bmiHeader.biClrUsed = 0;
bitmapinfo.bmiHeader.biClrImportant = 0;
bitmapinfo.bmiColors->rgbBlue = 0;
bitmapinfo.bmiColors->rgbRed = 0;
bitmapinfo.bmiColors->rgbGreen = 0;
bitmapinfo.bmiColors->rgbReserved = 0;
// dimension DIB and set up pointer to bits
dib = CreateDIBSection(dibdc, &bitmapinfo, DIB_RGB_COLORS, &lpvBits, NULL, 0);
SelectObject(dibdc, dib);
BitBlt(dibdc, 0, 0, VIDEO_WIDTH, VIDEO_HEIGHT, surfdc, 0, 0, SRCCOPY);
rtStart = 0; // timeslice : where to place frames into mp4 files.
for (i = 0; i < NUMAVI; i++)
{
swprintf(szInitialFilenameAvi, L"%s%s_%s", FOLDER, szmp4[i], INITIALMP4);
pSinkWriter[i] = NULL;
hr = InitializeSinkWriter(&(pSinkWriter[i]), &(izStream[i]), szInitialFilenameAvi);
if (!SUCCEEDED(hr)) {
printf("Failed to create mp4 file %d %ls \n", i, szmp4[i]);
}
// hAvi[i] = CreateAvi(szInitialFilenameAvi, AVIFRAMEPERIOD, NULL);
//if (hAvi[i] == 0) {
// printf("Failed to create avi file %d", i);
// getch(); getch(); getch();
//}
};
printf("got to here: Initialized SinkWriters \n");
getch();
// 1000/25 = 40
//ZeroMemory(&opts, sizeof(opts));
//opts.fccHandler = mmioFOURCC('D', 'I', 'B', ' ');//('d','i','v','x');
//opts.dwFlags = 8;
//for (i = 0; i < NUMAVI; i++)
//{
// hresult = SetAviVideoCompression(hAvi[i], dib, &opts, false, hWnd); // always run this for every avi file but can
// // call with false as long as we know opts contains valid information.
// if (hresult != 0) {
// printf("error: i = %d, hresult = %d", i, (long)hresult);
// getch(); getch(); getch();
// };
//};
counter = 0;
//ReleaseDC(hWnd,surfdc);
p_backbuffer_surface->ReleaseDC(surfdc);
GlobalCutaway = true; // dies if true
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
// Main message loop:
memset(&msg, 0, sizeof(MSG));
while (msg.message != WM_QUIT)
{
if (PeekMessage(&msg, NULL, 0U, 0U, PM_REMOVE))
{
TranslateMessage(&msg);
DispatchMessage(&msg);
} else {
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
};
};
UnregisterClass(szWindowClass, wcex.hInstance);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
/* Auxiliary routine: printing a matrix */
void print_matrix(char* desc, lapack_int m, lapack_int n, double* a, lapack_int lda) {
lapack_int i, j;
printf("\n %s\n", desc);
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) printf(" %2.5E", a[i*lda + j]);
printf("\n");
}
}
/* Auxiliary routine: printing a vector of integers */
void print_int_vector(char* desc, lapack_int n, lapack_int* a) {
lapack_int j;
printf("\n %s\n", desc);
for (j = 0; j < n; j++) printf(" %6i", a[j]);
printf("\n");
}
LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam)
{
f64 lowest_vez;
long iLow, iMinor;
Triangle * pTri;
Vertex * pVertex;
long izTri[128];
static bool bInvoked_cuSyst = false;
static long GSCCPU = 0;
int iAntiskips;
int wmId, wmEvent;
int i, j, ctr;
PAINTSTRUCT ps;
HDC hdc;
real time_back_for_Adot;
FILE * file, *fp;
int maxeerr, count, iMin;
WCHAR buf1000[1024];
char buf1001[1024];
int attempts;
real store_h;
char ch, o;
int failed;
RECT rect;
real TotalArea, TotalCharge;
long iVertex;
real mass_avg, mass_SD, mass_min, mass_max;
OPENFILENAME ofn; // common dialog box structure
char szFile[260]; // buffer for file name
char szFilter[1000]; // buffer for file filter
char szfilter[256];
char buffer[256];
TriMesh * temp;
static const real XCENTRE2 = DEVICE_RADIUS_INITIAL_FILAMENT_CENTRE * sin(PI / 32.0);
static const real XCENTRE1 = -XCENTRE2;
static const real YCENTRE = DEVICE_RADIUS_INITIAL_FILAMENT_CENTRE * cos(PI / 32.0);
switch (message)
{
case WM_CREATE:
// Don't ever try doing initialisation here;
// That should be done manually from the menus.
break;
case WM_COMMAND:
wmId = LOWORD(wParam);
wmEvent = HIWORD(wParam);
printf("\nWM_COMMAND: wmId %d\n\n", wmId);
// Ensure that display menu items are consecutive IDs.
// Parse the menu selections:
switch (wmId)
{
case ID_DISPLAY_ONE_D:
// printf("\a\n");
// Don't know why resource.h is not working;
// Maybe some #define overwrites it with 40024.
//wmId += 50007 - 40024;
GlobalSpeciesToGraph = ONE_D;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
// int const GraphFlags[NUMAVI] = { SPECIES_ION, OVERALL, JZAZBXYEZ, OHMSLAW, ONE_D, IONIZEGRAPH };
case ID_DISPLAY_ION:
GlobalSpeciesToGraph = SPECIES_ION;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_TOTAL:
GlobalSpeciesToGraph = OVERALL;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_JZAZBXYEZ:
GlobalSpeciesToGraph = JZAZBXYEZ;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_IONIZEGRAPHS:
GlobalSpeciesToGraph = IONIZEGRAPH;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_OHMS:
GlobalSpeciesToGraph = OHMSLAW;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_DTGRAPH:
GlobalSpeciesToGraph = DTGRAPH;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_ACCELGRAPH:
GlobalSpeciesToGraph = ACCELGRAPHS;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_TENSOROHMS:
GlobalSpeciesToGraph = OHMS2;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_ACCELRELZ:
GlobalSpeciesToGraph = ARELZ;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_SIGMAEJ:
i = wmId - ID_DISPLAY_NEUT;
GlobalSpeciesToGraph = i;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_HELP_ABOUT:
DialogBox(hInst, MAKEINTRESOURCE(IDD_ABOUTBOX), hWnd, About);
break;
case ID_FILE_EXIT:
DestroyWindow(hWnd);
break;
case ID_FILE_SAVECAMERA:
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hWnd;
ofn.lpstrFile = szFile;
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
memcpy(szfilter, "All\0*.*\0cam\0*.CAM\0\0", 19); // strcpy stops at first null !!
ofn.lpstrFilter = szfilter; //"All\0*.*\0Dat\0*.DAT\0\0"; // summat weird about that example code
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = NULL;
ofn.Flags = OFN_OVERWRITEPROMPT;
ofn.lpstrTitle = NULL;
if (GetSaveFileName(&ofn) == TRUE)
{
printf("\nsaving camera...");
fp = fopen(ofn.lpstrFile, "wt");
if (fp == 0) {
printf("save failed.\n");
}
else {
fprintf(fp, "%f %f %f ", GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
fprintf(fp, "%f %f %f ", GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
fprintf(fp, "%f %f %f ", GlobalEye.x, GlobalEye.y, GlobalEye.z);
fprintf(fp, "%f %f %f ", GlobalPlanLookat.x, GlobalPlanLookat.y, GlobalPlanLookat.z);
fclose(fp);
printf("done\n");
};
}
else {
printf("there was an issue\n");
};
break;
case ID_FILE_LOADCAMERA:
// Initialize OPENFILENAME
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hWnd;
ofn.lpstrFile = szFile;
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
memcpy(szfilter, "All\0*.*\0*.cam\0*.Cam\0\0", 21); // strcpy stops at first null !!
ofn.lpstrFilter = szfilter; //"All\0*.*\0*.Dat\0*.DAT\0\0"; // summat weird about that example code
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = NULL;
ofn.Flags = OFN_PATHMUSTEXIST | OFN_FILEMUSTEXIST;
// Display the Open dialog box.
if (GetOpenFileName(&ofn) == TRUE)
{
printf("\nloading camera...");
fp = fopen(ofn.lpstrFile, "rt");
if (fp == 0) {
printf("failed.\n");
}
else {
rewind(fp);
fscanf(fp, "%f %f %f ", &(GlobalPlanEye.x), &(GlobalPlanEye.y), &(GlobalPlanEye.z));
fscanf(fp, "%f %f %f ", &(GlobalLookat.x), &(GlobalLookat.y), &(GlobalLookat.z));
fscanf(fp, "%f %f %f ", &(GlobalEye.x), &(GlobalEye.y), &(GlobalEye.z));
fscanf(fp, "%f %f %f ", &(GlobalPlanLookat.x), &(GlobalPlanLookat.y), &(GlobalPlanLookat.z));
fclose(fp);
};
RefreshGraphs(*pX, GlobalSpeciesToGraph); // sends data to graphs AND renders them
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
}
else {
printf("file error camera\n");
};
break;
case ID_FILE_LOADGPU:
// Initialize OPENFILENAME:
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hWnd;
ofn.lpstrFile = szFile;
//
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
//strcpy(szFilter, "All\0*.*\0Dat\0*.DAT\0\0");
memcpy(szfilter, "All\0*.*\0Dat\0*.DAT\0\0", 19); // strcpy stops at first null !!
ofn.lpstrFilter = szfilter; //"All\0*.*\0Dat\0*.DAT\0\0"; // summat weird about that example code
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = NULL;
ofn.Flags = OFN_PATHMUSTEXIST | OFN_FILEMUSTEXIST;
// Display the Open dialog box.
if (GetOpenFileName(&ofn) == TRUE)
{
if (bInvoked_cuSyst == false) {
bInvoked_cuSyst = true;
pX->EnsureAnticlockwiseTriangleCornerSequences_SetupTriMinorNeighboursLists();
pX->Average_n_T_to_tris_and_calc_centroids_and_minorpos();
pX->Create4Volleys(); // THIS SHOULD NOT ALWAYS BE HERE !!
printf("Called Create4Volleys! This should be removed in favour of loaded iVolley.\n");
cuSyst_host.InvokeHost();
cuSyst_host.PopulateFromTriMesh(pX);
cuSyst_host2.InvokeHost();
cuSyst_host2.PopulateFromTriMesh(pX);
// transfer information.
PerformCUDA_Invoke_Populate(
&cuSyst_host,
NUMVERTICES,
pX->InnermostFrillCentroidRadius,
pX->OutermostFrillCentroidRadius,
pX->numStartZCurrentTriangles,
pX->numEndZCurrentTriangles);
};
cuSyst_host.Load(ofn.lpstrFile);
};
printf("Populate *pX\n");
cuSyst_host.PopulateTriMesh(pX);
printf("send to device\n");
cuSyst_host.SendToDevice(cuSyst1);
printf("done\n");
// Debug: redelaun on load:
pX->RefreshVertexNeighboursOfVerticesOrdered();
// pX->Redelaunerize(true, true);
// This isn't actually helpful?
// pX->RefreshVertexNeighboursOfVerticesOrdered();
// pX->X[89450-BEGINNING_OF_CENTRAL].GetTriIndexArray(izTri);
// printf("89450 : %d %d %d %d %d %d \n",
// izTri[0], izTri[1], izTri[2], izTri[3], izTri[4], izTri[5]);
//
pX->EnsureAnticlockwiseTriangleCornerSequences_SetupTriMinorNeighboursLists();
// pX->Average_n_T_to_tris_and_calc_centroids_and_minorpos(); // Obviates some of our flip calcs to replace tri n,T
// not sure if needed .. just for calc centroid .. they do soon get wiped out anyway.
cuSyst_host.PopulateFromTriMesh(pX);
cuSyst_host.SendToDevice(cuSyst1); // check this is right
cuSyst2.CopyStructuralDetailsFrom(cuSyst1);
cuSyst3.CopyStructuralDetailsFrom(cuSyst1);
// Let's assume these always carry through during GPU runs.
// It certainly does not work as it stands if you don't populate them all the same, put it that way!!
printf("sent back re-delaunerized system\n");
break;
case ID_FILE_SAVEBINARY:
// Initialize OPENFILENAME
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hWnd;
ofn.lpstrFile = szFile;
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
memcpy(szfilter, "All\0*.*\0*.dat\0*.Dat\0\0", 21); // strcpy stops at first null !!
ofn.lpstrFilter = szfilter; //"All\0*.*\0Dat\0*.DAT\0\0"; // summat weird about that example code
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = NULL;
ofn.Flags = OFN_OVERWRITEPROMPT;
ofn.lpstrTitle = NULL;
// Display the Open dialog box.
if (GetSaveFileName(&ofn) == TRUE)
{
printf("\nsaving system...");
pX->Save(ofn.lpstrFile);
printf("done\n");
}
else {
printf("there was an issue\n");
};
break;
case ID_FILE_SAVETEXT:
// Initialize OPENFILENAME
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hWnd;
ofn.lpstrFile = szFile;
//
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
//strcpy(szFilter,"All\0*.*\0Text\0*.TXT\0");
memcpy(szfilter, "All\0*.*\0Dat\0*.DAT\0\0", 19); // strcpy stops at first null !!
ofn.lpstrFilter = szfilter; //"All\0*.*\0Dat\0*.DAT\0\0"; // summat weird about that example code
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = NULL;
ofn.Flags = OFN_OVERWRITEPROMPT;
ofn.lpstrTitle = NULL;
// Display the Open dialog box.
if (GetSaveFileName(&ofn) == TRUE)
{
printf("\nsaving system...");
pX->SaveText(ofn.lpstrFile);
printf("done\n");
}
else {
printf("there was an issue\n");
};
break;
case ID_FILE_LOAD:
// Initialize OPENFILENAME:
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hWnd;
ofn.lpstrFile = szFile;
//
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
//strcpy(szFilter, "All\0*.*\0Dat\0*.DAT\0\0");
memcpy(szfilter, "All\0*.*\0Dat\0*.DAT\0\0", 19); // strcpy stops at first null !!
ofn.lpstrFilter = szfilter; //"All\0*.*\0Dat\0*.DAT\0\0"; // summat weird about that example code
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = NULL;
ofn.Flags = OFN_PATHMUSTEXIST | OFN_FILEMUSTEXIST;
// Display the Open dialog box.
if (GetOpenFileName(&ofn) == TRUE)
{
pX->Load(ofn.lpstrFile);
printf("\ndoing nothing...");
};
break;
case ID_RUN_SIMULATIONSTEPS:
GlobalSwitchBox = 0;
DialogBox(hInst, MAKEINTRESOURCE(IDD_DIALOG1), hWnd, SetupBox);
// that will not return with steps_remaining unset.
if (steps_remaining > 0)
SetTimer(hWnd, 1, 1, NULL); // 1 millisecond delay
break;
case ID_RUN_SIMULATIONSTEPS_CPU:
GlobalSwitchBox = 0;
steps_remaining_CPU = 1;
// that will not return with steps_remaining unset.
if (steps_remaining_CPU > 0)
SetTimer(hWnd, 2, 1, NULL); // 1 millisecond delay
break;
case ID_INITIALISE_ZAPTHEBACK:
Zap_the_back();
printf("done");
RefreshGraphs(*pX, GlobalSpeciesToGraph); // sends data to graphs AND renders them
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_RUN_STOP:
steps_remaining = 0;
steps_remaining_CPU = 0;
break;
case ID_INITIALISE_IONISATIONSTEPS:
break;
default:
return DefWindowProc(hWnd, message, wParam, lParam);
}
break;
case WM_TIMER:
KillTimer(hWnd, wParam);
report_time(0);
if (wParam == 1)
{
if (bInvoked_cuSyst == false) {
bInvoked_cuSyst = true;
pX->EnsureAnticlockwiseTriangleCornerSequences_SetupTriMinorNeighboursLists();
pX->Average_n_T_to_tris_and_calc_centroids_and_minorpos();
//
// printf("tri 340: %d %d %d \n%1.14E %1.14E \n%1.14E %1.14E \n%1.14E %1.14E\n",
// pX->T[340].cornerptr[0] - pX->X, pX->T[340].cornerptr[1] - pX->X, pX->T[340].cornerptr[2] - pX->X,
// pX->T[340].cornerptr[0]->pos.x, pX->T[340].cornerptr[0]->pos.y,
// pX->T[340].cornerptr[1]->pos.x, pX->T[340].cornerptr[1]->pos.y,
// pX->T[340].cornerptr[2]->pos.x, pX->T[340].cornerptr[2]->pos.y);
// printf("tri 340 periodic %d \n", pX->T[340].periodic);
// getch();
cuSyst_host.InvokeHost();
cuSyst_host.PopulateFromTriMesh(pX);
cuSyst_host2.InvokeHost();
cuSyst_host2.PopulateFromTriMesh(pX);
// cuSyst_host.Output("n0.txt");
PerformCUDA_Invoke_Populate(
&cuSyst_host,
NUMVERTICES,
pX->InnermostFrillCentroidRadius,
pX->OutermostFrillCentroidRadius,
pX->numStartZCurrentTriangles,
pX->numEndZCurrentTriangles);
}
// Run 1 step:
printf("evaltime %1.9E\n", evaltime);
// PerformCUDA_RunStepsAndReturnSystem_Debug(&cuSyst_host, &cuSyst_host2, pX, &X3, pXnew);
PerformCUDA_RunStepsAndReturnSystem(&cuSyst_host);
// printf("Stamp GPU over CPU y/n:");
// do {
// o = getch();
// } while ((o != 'y') && (o != 'n'));
// printf("%c\n\n", o);
// if (o == 'y')
// Auto-save system:
if (GlobalStepsCounter % DATA_SAVE_FREQUENCY == 0)
{
sprintf(szFile, "auto%d.dat", GlobalStepsCounter);
// SAVE cuSyst:
cuSyst_host.Save(szFile);
}
// even number of steps should lead us back to pX having it
steps_remaining--;
GlobalStepsCounter++;
printf("Done steps: %d || Remaining this run: %d\n\n", GlobalStepsCounter, steps_remaining);
if ((GlobalStepsCounter % GRAPHICS_FREQUENCY == 0) ||
(GlobalStepsCounter % REDELAUN_FREQUENCY == 0) ||
(steps_remaining == 0))
{
cuSyst_host.PopulateTriMesh(pX); // vertex n is populated into the minor array available on CPU
printf("pulled back to host\n");
}
}
else {
pX->Advance(pXnew, &X3);
temp = pX;
pX = pXnew;
pXnew = temp;
steps_remaining_CPU--;
GSCCPU++;
printf("Done steps CPU: %d || Remaining this run: %d\n\n", GSCCPU, steps_remaining_CPU);
sprintf(buf1001, "autosaveCPU%d.dat", GSCCPU);
pX->Save(buf1001);
printf("saved as %s\n", buf1001);
};
printf("%s\n", report_time(1));
if (GlobalStepsCounter % GRAPHICS_FREQUENCY == 0)
{
// make video frames:
for (i = 0; i < NUMAVI; i++)
{
printf("i = %d \n", i);
RefreshGraphs(*pX, GraphFlags[i]); // sends data to graphs AND renders them
// ::PlanViewGraphs1(*pX);
printf(".DISHMOPS.\n");
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
printf("got to here 7\n");
if (DXChk(p_backbuffer_surface->GetDC(&surfdc), 100))
MessageBox(NULL, "GetDC failed", "oh dear", MB_OK);
//SelectObject(surfdc,surfbit);
BitBlt(dibdc, 0, 0, VIDEO_WIDTH, VIDEO_HEIGHT, surfdc, 0, 0, SRCCOPY);
p_backbuffer_surface->ReleaseDC(surfdc);
//GetDIBits(dibdc, dib, 0, VIDEO_HEIGHT, dwBits, &bmi, 0);
// just use lpvBits
// getting hAvi[i] == 0 for the last one.
// But on debug? No such thing? Same.
printf("Adding frame to %d : \n", i);
hresult = WriteFrame(pSinkWriter[i], izStream[i], rtStart);
//hresult = AddAviFrame(hAvi[i], dib);
if (hresult != 0) printf("\n******************************************************* \n"
"hresult = %d\n********************************************** \n", hresult);
};
rtStart += VIDEO_FRAME_DURATION;
// sprintf(szFile, "System_%d", GlobalStepsCounter);
// pX->SaveText(szFile);
};
if (GlobalStepsCounter % (AVI_FILE_PINCHOFF_FREQUENCY * GRAPHICS_FREQUENCY) == 0)
{
for (i = 0; i < NUMAVI; i++)
{
// now have to pinch out avi file and make a new one
pSinkWriter[i]->Finalize();
// CloseAvi(hAvi[i]);
swprintf(buf1000, L"%s%s_%d.mp4", FOLDER, szmp4[i], GlobalStepsCounter);
//hAvi[i] = CreateAvi(buf1000, AVIFRAMEPERIOD, NULL);
SafeRelease(&pSinkWriter[i]);
pSinkWriter[i] = NULL;
rtStart = 0;
if (!SUCCEEDED(InitializeSinkWriter(&(pSinkWriter[i]), &(izStream[i]), szmp4[i])))
{
printf("Failed to create %ls \n", buf1000);
getch();
}
//hresult = SetAviVideoCompression(hAvi[i], dib, &opts, false, hWnd);
//if (hresult != 0) printf("\n******************************************************* \n"
// "SetAviVideoCompression: hresult = %d\n********************************************** \n", hresult);
};
};
RefreshGraphs(*pX,GlobalSpeciesToGraph); // sends data to graphs AND renders them
Direct3D.pd3dDevice->Present( NULL, NULL, NULL, NULL );
if (GlobalStepsCounter % REDELAUN_FREQUENCY == 0)
{
Setup_residual_array(); // We have not specifically checked that cuSyst1 is the
// most up-to-date, but it doesn't matter really.
pX->RefreshVertexNeighboursOfVerticesOrdered();
long iFlips = pX->Redelaunerize(true, true);
// Send back to GPU:
pX->EnsureAnticlockwiseTriangleCornerSequences_SetupTriMinorNeighboursLists();
// Appears in lots of places so hard to believe data is not updated.
// There is no wrapping on GPU?... or is there?
// Need to debug inside routine and find out what it is doing for these two triangles.
// pX->Average_n_T_to_tris_and_calc_centroids_and_minorpos(); // Obviates some of our flip calcs to replace tri n,T
// not sure if needed .. just for calc centroid .. they do soon get wiped out anyway.
cuSyst_host.PopulateFromTriMesh(pX);// 1. Does it update lists? --- some had to be updated on CPU first.
// Seems to copy structural information as well as data. n is copied from n_minor on CPU.
//cuSyst1.SendToHost(cuSyst_host2);
//cuSyst_host.ReportDifferencesHost(cuSyst_host2);
cuSyst_host.SendToDevice(cuSyst1);
cuSyst2.CopyStructuralDetailsFrom(cuSyst1);
cuSyst3.CopyStructuralDetailsFrom(cuSyst1);
// Let's assume these always carry through during GPU runs.
// It certainly does not work as it stands if you don't populate them all the same, put it that way!!
// We don't actually know which system is pointed to by pX1 that is the initial system
// for the steps --- so just copy it over all of them
cuSyst_host.SendToDevice(cuSyst2);
cuSyst_host.SendToDevice(cuSyst3);
// There almost certainly is a better way. But this is unimportant for now.
printf("sent back re-delaunerized system\n");
// Now reset A values more carefully in the sent-back system:
if (iFlips == 0) {
printf(" NO DELAUNAY FLIPS");
for (int sj = 0; sj < 10; sj++) printf("-\n");
}
if (iFlips > 0) {
Go_visit_the_other_file();
};
};
if (steps_remaining > 0) {
SetTimer(hWnd, 1, DELAY_MILLISECS, NULL);
printf("Waiting %d milliseconds to allow user input.\n", DELAY_MILLISECS);
};
if (steps_remaining_CPU > 0) {
SetTimer(hWnd, 2, DELAY_MILLISECS, NULL);
printf("Waiting %d milliseconds to allow user input.\n", DELAY_MILLISECS);
};
/*
if (wParam == 1) {
sprintf(buf1000, "autosaveGPU%d.dat", GlobalStepsCounter);
} else {
sprintf(buf1000, "autosaveCPU%d.dat", GSCCPU);
}
pX->Save(buf1000);
printf("saved as %s\n", buf1000);
lowest_vez = 0.0;
iLow = 0;
pTri = pX->T;
for (iMinor = 0; iMinor < BEGINNING_OF_CENTRAL; iMinor++)
{
if ((pTri->u8domain_flag == DOMAIN_TRIANGLE) && (pX->pData[iMinor].vez < lowest_vez)) {
lowest_vez = pX->pData[iMinor].vez;
iLow = iMinor;
}
++pTri;
}
printf("Tris: lowest_vez %1.14E iLow %d \n", lowest_vez, iLow);
iLow = 0;
lowest_vez = 0.0;
pVertex = pX->X;
for (; iMinor < NMINOR; iMinor++)
{
if ((pVertex->flags == DOMAIN_VERTEX) && (pX->pData[iMinor].vez < lowest_vez)) {
lowest_vez = pX->pData[iMinor].vez;
iLow = iMinor;
}
++pVertex;
}
printf("Vertices: lowest_vez %1.14E iLow %d \n\n", lowest_vez, iLow);
printf("save ascii?");
do {
o = getch();
} while ((o != 'y') && (o != 'n'));
printf("%c\n", o);
if (o == 'y') {
sprintf(buf1000, "SaveGPUtext1_trackedAA");
pX->SaveText(buf1000);
printf("Ascii file saved %s.\n",buf1000);
}
*/
printf("steps_remaining GPU: %d CPU: %d\n",steps_remaining, steps_remaining_CPU);
break;
case WM_KEYDOWN:
switch (wParam)
{
case 'W':
GlobalEye.z += 1.0f;
printf("GlobalEye %f %f %f \n",
GlobalEye.x, GlobalEye.y, GlobalEye.z);
break;
case 'S':
GlobalEye.z -= 1.0f;
printf("GlobalEye %f %f %f \n",
GlobalEye.x, GlobalEye.y, GlobalEye.z);
break;
case 'A':
GlobalEye.x -= 0.8f;
printf("GlobalEye %f %f %f \n",
GlobalEye.x, GlobalEye.y, GlobalEye.z);
break;
case 'D':
GlobalEye.x += 0.8f;
printf("GlobalEye %f %f %f \n",
GlobalEye.x, GlobalEye.y, GlobalEye.z);
break;
case 'E':
GlobalEye.y += 0.8f;
printf("GlobalEye %f %f %f \n",
GlobalEye.x, GlobalEye.y, GlobalEye.z);
break;
case 'C':
GlobalEye.y -= 0.8f;
printf("GlobalEye %f %f %f \n",
GlobalEye.x, GlobalEye.y, GlobalEye.z);
break;
case 'V':
GlobalLookat.z -= 0.4f;
printf("GlobalEye %f %f %f GlobalLookat %f %f %f\n",
GlobalEye.x, GlobalEye.y, GlobalEye.z, GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
break;
case 'R':
GlobalLookat.z += 0.4f;
printf("GlobalEye %f %f %f GlobalLookat %f %f %f\n",
GlobalEye.x, GlobalEye.y, GlobalEye.z, GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
break;
case 'F':
GlobalLookat.x -= 0.4f;
printf("GlobalEye %f %f %f GlobalLookat %f %f %f\n",
GlobalEye.x, GlobalEye.y, GlobalEye.z, GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
break;
case 'G':
GlobalLookat.x += 0.4f;
printf("GlobalEye %f %f %f GlobalLookat %f %f %f\n",
GlobalEye.x, GlobalEye.y, GlobalEye.z, GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
break;
case 'T':
GlobalLookat.y += 0.4f;
printf("GlobalLookat %f %f %f\n",
GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
break;
case 'B':
GlobalLookat.y -= 0.4f;
printf("GlobalLookat %f %f %f\n",
GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
break;
case '+':
GlobalCutaway = !GlobalCutaway;
break;
case 'Y':
case '<':
GlobalEye.x = -10.4; GlobalEye.y = 16.4; GlobalEye.z = 44.0;
GlobalLookat.x = -3.6; GlobalLookat.y = 3.0; GlobalLookat.z = 72.2;
printf("GlobalEye %f %f %f GlobalLookat %f %f %f\n",
GlobalEye.x, GlobalEye.y, GlobalEye.z, GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
GlobalPlanEye.x = 7.1; GlobalPlanEye.y = 11.5; GlobalPlanEye.z = 71.35;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case '_':
case '-':
case '>':
GlobalPlanEye.x = 7.0; GlobalPlanEye.y = 14.0; GlobalPlanEye.z = 71.0;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case 'U':
GlobalPlanEye.z += 0.6f;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case 'J':
GlobalPlanEye.z -= 0.6f;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case 'H':
GlobalPlanEye.x -= 0.6f;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case 'K':
GlobalPlanEye.x += 0.6f;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case 'I':
GlobalPlanEye.y *= 1.25f;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case 'M':
GlobalPlanEye.y *= 0.8f;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case 'N':
GlobalboolDisplayMeshWireframe = !GlobalboolDisplayMeshWireframe;
//Graph1.boolDisplayMeshWireframe = (!(Graph1.boolDisplayMeshWireframe));
break;
case '9':
GlobalRenderLabels = false;
break;
case '5':
GlobalRenderLabels = true;
GlobalWhichLabels = 0;// iTri
break;
case '8':
GlobalRenderLabels = true;
GlobalWhichLabels = 1;//T
break;
case '7':
GlobalRenderLabels = true;
GlobalWhichLabels = 2;//v
break;
case '6':
GlobalRenderLabels = true;
GlobalWhichLabels = 3; //n
break;
case '1':
GlobalColoursPlanView = 1;//v
break;
case '4':
GlobalColoursPlanView = 0;//nothing
break;
case '2':
GlobalColoursPlanView = 2;//T
break;
case '0':
steps_remaining = 0;
break;
case 'Q':
newEye.z += 5.0f;
printf("newEye.z %1.9E\n", newEye.z);
break;
case 'P':
newEye.z -= 5.0f;
printf("newEye.z %1.9E\n", newEye.z);
break;
case 'X':
newEye.y += 5.0f;
printf("newEye.y %1.9E\n", newEye.y);
break;
case 'Z':
newEye.y -= 5.0f;
printf("newEye.y %1.9E\n", newEye.y);
break;
case 'O':
newLookat.z -= 3.0f;
printf("newLookat.z %1.9E\n", newLookat.z);
break;
case ';':
case ':':
newLookat.z += 3.0f;
printf("newLookat.z %1.9E\n", newLookat.z);
break;
default:
return DefWindowProc(hWnd, message, wParam, lParam);
};
//PlanViewGraphs1(*pX);
RefreshGraphs(*pX, GlobalSpeciesToGraph); // sends data to graphs AND renders them
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case WM_PAINT:
// Not sure, do we want to do this?
// RefreshGraphs(*pX,); // sends data to graphs AND renders them
GetUpdateRect(hWnd, &rect, FALSE);
if (Direct3D.pd3dDevice != NULL)
Direct3D.pd3dDevice->Present(&rect, &rect, NULL, NULL);
ValidateRect(hWnd, NULL);
break;
case WM_DESTROY:
DeleteObject(dib);
DeleteDC(dibdc);
for (i = 0; i < NUMAVI; i++)
{
pSinkWriter[i]->Finalize();
SafeRelease(&(pSinkWriter[i]));
}
// CloseAvi(hAvi[i]);
// _controlfp_s(0, cw, _MCW_EM); // Line A
PerformCUDA_Revoke();
MFShutdown();
CoUninitialize();
PostQuitMessage(0);
break;
default:
return DefWindowProc(hWnd, message, wParam, lParam);
}
return 0;
}
// Message handler for about box.
INT_PTR CALLBACK About(HWND hDlg, UINT message, WPARAM wParam, LPARAM lParam)
{
UNREFERENCED_PARAMETER(lParam);
switch (message)
{
case WM_INITDIALOG:
return (INT_PTR)TRUE;
case WM_COMMAND:
if (LOWORD(wParam) == IDOK || LOWORD(wParam) == IDCANCEL)
{
EndDialog(hDlg, LOWORD(wParam));
return (INT_PTR)TRUE;
}
break;
}
return (INT_PTR)FALSE;
}
INT_PTR CALLBACK SetupBox(HWND hDlg, UINT message, WPARAM wParam, LPARAM lParam)
{
UNREFERENCED_PARAMETER(lParam);
char buffer[2048];
char string[1024];
real newh;
switch (message)
{
case WM_INITDIALOG:
sprintf(buffer, "New h? (present = %1.10E)", h);
if (GlobalSwitchBox)
SetDlgItemText(hDlg, IDC_STATIC, buffer);
return (INT_PTR)TRUE;
case WM_COMMAND:
if (LOWORD(wParam) == IDOK)
{
// try to read data from edit control:
GetDlgItemText(hDlg, IDC_EDIT1, buffer, 2048);
if (GlobalSwitchBox == 0)
{
//
steps_remaining = atoi(buffer);
if (steps_remaining >= 0)
{
EndDialog(hDlg, LOWORD(wParam));
}
else {
MessageBox(NULL, "incorrect value", "Enter a nonnegative integer.", MB_OK);
};
}
else {
newh = atof(buffer);
if (newh > 0.0)
{
EndDialog(hDlg, LOWORD(wParam));
sprintf(string, "h = %1.10E\n", newh);
h = newh;
MessageBox(NULL, string, "New value of h", MB_OK);
}
else {
MessageBox(NULL, "no good", "Negative h entered", MB_OK);
};
};
return (INT_PTR)TRUE;
}
break;
}
return (INT_PTR)FALSE;
}
| 3e6755d6f0214de3b87a0cab5b2d32c63c38a039.cu | #define real double
#define f64 double
#define HISTORY 4
#include <stdlib.h>
#include <stdio.h>
#include "lapacke.h"
// we must find out what causes graphics crash during SPECIES_ION
/* Auxiliary routines prototypes */
extern void print_matrix(char* desc, lapack_int m, lapack_int n, double* a, lapack_int lda);
extern void print_int_vector(char* desc, lapack_int n, lapack_int* a);
extern void Go_visit_the_other_file();
extern void Setup_residual_array();
#include "headers.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <stdio.h>
#include <windows.h>
#include "resource.h"
#include "flags.h"
#include "FFxtubes.h"
//#include "cppconst.h"
#include "cuda_struct.h"
#include "constant.h"
#include "d3d.h"
#include <d3dx9.h>
#include <dxerr.h>
#include <commdlg.h> // probably used by avi_utils
#include "surfacegraph_tri.h"
//#include "avi_utils.cpp" // for making .avi
#include "kernel.h"
#include <mfapi.h>
#include <mfidl.h>
#include <Mfreadwrite.h>
#include <mferror.h>
#include <iostream>
#include <shlwapi.h>
#include <combaseapi.h>
#pragma comment(lib, "mfreadwrite")
#pragma comment(lib, "mfplat")
#pragma comment(lib, "mf")
#pragma comment(lib, "mfuuid")
template <class T> void SafeRelease(T **ppT)
{
if (*ppT)
{
(*ppT)->Release();
*ppT = NULL;
}
}
//=======================================================
// Declarations of functions:
void RefreshGraphs(TriMesh & X, const int iGraphsFlag);
LRESULT CALLBACK WndProc(HWND, UINT, WPARAM, LPARAM);
INT_PTR CALLBACK About(HWND, UINT, WPARAM, LPARAM);
INT_PTR CALLBACK SetupBox(HWND, UINT, WPARAM, LPARAM);
extern f64 GetEzShape__(f64 r);
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
extern void Zap_the_back();
extern f64 * temp_array_host;
extern OhmsCoeffs * p_OhmsCoeffs_host;
extern f64 * p_graphdata1_host,* p_graphdata2_host,* p_graphdata3_host, *p_graphdata4_host, *p_graphdata5_host, *p_graphdata6_host;
extern f64 * p_Tgraph_host[9];
extern f64 * p_accelgraph_host[12];
extern f64 * p_Ohmsgraph_host[20];
extern f64 * p_arelz_graph_host[12];
// Global variables:
// =================
//extern f64_vec3 * p_B_host;
extern f64 EzStrength_;
extern cuSyst cuSyst1, cuSyst2, cuSyst3;
extern D3D Direct3D;
extern f64 * p_temphost1, *p_temphost2,
*p_temphost3, *p_temphost4, *p_temphost5, *p_temphost6;
extern __device__ f64 * p_LapCoeffself;
extern __device__ f64 * p_temp1;
extern __device__ long * p_longtemp;
extern __device__ f64 * p_Az, *p_LapAz;
float xzscale;
bool bCullNone = false;
bool bGlobalsave = false;
int GlobalSwitchBox = 0;
int iGlobalScratch;
real GlobalHeightScale;
int GlobalSpeciesToGraph = SPECIES_ION;
int GlobalWhichLabels = 0;
bool GlobalRenderLabels = false;
int GlobalColoursPlanView = 0;
bool GlobalBothSystemsInUse;
bool GlobalCutaway = true;
unsigned int cw; // control word for floating point hardware exception hiding
TriMesh * pX, *pXnew;
TriMesh X1, X2, X3, X4;
cuSyst cuSyst_host, cuSyst_host2;
D3DXVECTOR3 GlobalEye, GlobalLookat, GlobalPlanEye, GlobalPlanEye2, GlobalPlanLookat,
GlobalPlanLookat2, GlobalEye2, GlobalLookat2;
D3DXVECTOR3 newEye;
D3DXVECTOR3 newLookat;
IDirect3DSurface9* p_backbuffer_surface;
long steps_remaining, GlobalStepsCounter, steps_remaining_CPU;
real evaltime, h;
extern real GlobalIzElasticity;
FILE * massfile, *maxfile;
// Global Variables:
HINSTANCE hInst; // current instance
// window vars:
HWND hWnd, hwndGraphics;
WNDCLASSEX wcex;
TCHAR szTitle[1024]; // The title bar text
TCHAR szWindowClass[1024]; // the main window class name
char Functionalfilename[1024];
int GlobalGraphSetting[8];
surfacegraph Graph[8]; // why was it 5? // 5th one can be whole thing.
float Historic_max[512][HISTORY]; // if max is falling, use historic maximum for graph.
float Historic_min[512][HISTORY];
int Historic_powermax[512];
int Historic_powermin[512]; // just store previous value only.
bool flaglist[NMINOR];
bool boolGlobalHistory, GlobalboolDisplayMeshWireframe;
// avi file -oriented variables
int const NUMAVI = 9;
//HAVI hAvi[NUMAVI + 1]; // does it work without OHMSLAW? // OHMSLAW,
int const GraphFlags[NUMAVI] = { SPECIES_ION, OVERALL, JZAZBXYEZ, ONE_D, IONIZEGRAPH,
DTGRAPH, ACCELGRAPHS, OHMS2, ARELZ};
WCHAR szmp4[NUMAVI][128] = { L"Elec",L"Total",L"JzAzBxy",L"Test",
L"Ionize", L"dT", L"Accel", L"Ohms", L"arelz"};
//AVICOMPRESSOPTIONS opts;
int counter;
HBITMAP surfbit, dib;
HDC surfdc, dibdc;
LPVOID lpvBits;
BITMAPINFO bitmapinfo;
DWORD dwBits[VIDEO_HEIGHT*VIDEO_WIDTH];
f64 graphdata[20][10000];
f64 graph_r[10000];
int numgraphs = 4;
int num_graph_data_points = 10000;
f64 maximum[20];
f64 truemax[20];
extern TriMesh * pTriMesh;
char * report_time(int action)
{
/* action = 0: reset ; action = 1: report */
/* requires timebuffy to be defined as char[255] globally */
static char timebuffer[255];
static clock_t start;
double timenow;
long ops;
if (action == 0)
{
start = clock();
}
else
{
timenow = ((double)(clock() - start) / (double)CLOCKS_PER_SEC);
ops = (long)(clock() - start);
/* create a null-terminated string */
sprintf(timebuffer, "%6.4f sec.", timenow);
};
return &(timebuffer[0]);
};
f64 GetTriangleArea(f64_vec2 pos0, f64_vec2 pos1, f64_vec2 pos2)
{
f64 area = 0.5*((pos0.x + pos1.x)*(pos1.y - pos0.y) + (pos1.x + pos2.x)*(pos2.y - pos1.y)
+ (pos0.x + pos2.x)*(pos0.y - pos2.y));
return fabs(area);
}
// Format constants
//const UINT32 VIDEO_WIDTH = 640;
//const UINT32 VIDEO_HEIGHT = 480;
const UINT32 VIDEO_FPS = 5;
const UINT64 VIDEO_FRAME_DURATION = 10 * 1000 * 1000 / VIDEO_FPS; // ?
const UINT32 VIDEO_BIT_RATE = 1048768;
const UINT32 VIDEO_PELS = VIDEO_WIDTH * VIDEO_HEIGHT;
const UINT32 VIDEO_FRAME_COUNT = 5;
// with 50 frames per nanosecond and 30 nanoseconds in file, it's 1500
// But to begin let's say 5?
const GUID VIDEO_INPUT_FORMAT = MFVideoFormat_RGB24;
HRESULT InitializeSinkWriter(
IMFSinkWriter **ppWriter,
DWORD *pStreamIndex,
LPCWSTR szFilename)
{
*ppWriter = NULL;
*pStreamIndex = NULL;
IMFSinkWriter *pSinkWriter = NULL;
IMFMediaType *pMediaTypeOut = NULL;
IMFMediaType *pMediaTypeIn = NULL;
DWORD streamIndex;
HRESULT hr = MFCreateSinkWriterFromURL(szFilename, NULL, NULL, &pSinkWriter);
// Set the output media type.
if (SUCCEEDED(hr))
hr = MFCreateMediaType(&pMediaTypeOut);
if (SUCCEEDED(hr))
hr = pMediaTypeOut->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
if (SUCCEEDED(hr))
hr = pMediaTypeOut->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
// whereas webcam capture sample says WMMEDIASUBTYPE_I420
if (SUCCEEDED(hr))
hr = pMediaTypeOut->SetUINT32(MF_MT_AVG_BITRATE, VIDEO_BIT_RATE);
if (SUCCEEDED(hr))
hr = pMediaTypeOut->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
if (SUCCEEDED(hr))
hr = MFSetAttributeSize(pMediaTypeOut, MF_MT_FRAME_SIZE, VIDEO_WIDTH, VIDEO_HEIGHT);
if (SUCCEEDED(hr))
hr = MFSetAttributeRatio(pMediaTypeOut, MF_MT_FRAME_RATE, VIDEO_FPS, 1);
if (SUCCEEDED(hr))
hr = MFSetAttributeRatio(pMediaTypeOut, MF_MT_PIXEL_ASPECT_RATIO, 1, 1);
if (SUCCEEDED(hr))
hr = pSinkWriter->AddStream(pMediaTypeOut, &streamIndex);
// Set the input media type.
if (SUCCEEDED(hr))
hr = MFCreateMediaType(&pMediaTypeIn);
if (SUCCEEDED(hr))
hr = pMediaTypeIn->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
if (SUCCEEDED(hr))
hr = pMediaTypeIn->SetGUID(MF_MT_SUBTYPE, VIDEO_INPUT_FORMAT);
if (SUCCEEDED(hr))
hr = pMediaTypeIn->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
// should that be 0 ?
if (SUCCEEDED(hr))
hr = MFSetAttributeSize(pMediaTypeIn, MF_MT_FRAME_SIZE, VIDEO_WIDTH, VIDEO_HEIGHT);
if (SUCCEEDED(hr))
hr = MFSetAttributeRatio(pMediaTypeIn, MF_MT_FRAME_RATE, VIDEO_FPS, 1);
if (SUCCEEDED(hr))
hr = MFSetAttributeRatio(pMediaTypeIn, MF_MT_PIXEL_ASPECT_RATIO, 1, 1);
if (SUCCEEDED(hr))
hr = pSinkWriter->SetInputMediaType(streamIndex, pMediaTypeIn, NULL);
// Tell the sink writer to start accepting data.
if (SUCCEEDED(hr)) {
hr = pSinkWriter->BeginWriting();
}
// Return the pointer to the caller.
if (SUCCEEDED(hr)) {
*ppWriter = pSinkWriter;
(*ppWriter)->AddRef();
*pStreamIndex = streamIndex;
}
SafeRelease(&pSinkWriter);
SafeRelease(&pMediaTypeOut);
SafeRelease(&pMediaTypeIn);
return hr;
}
HRESULT WriteFrame(
IMFSinkWriter *pWriter,
DWORD streamIndex,
const LONGLONG& rtStart // Time stamp.
)
{
IMFSample *pSample = NULL;
IMFMediaBuffer *pBuffer = NULL;
// SHOULD THIS BE 3 * ?
const LONG cbWidth = 3 * VIDEO_WIDTH; // 4 bytes --- why?
// so cbWidth is width in bytes
const DWORD cbBuffer = cbWidth * VIDEO_HEIGHT;
BYTE *pData = NULL;
// Create a new memory buffer.
HRESULT hr = MFCreateMemoryBuffer(cbBuffer, &pBuffer);
// Lock the buffer and copy the video frame to the buffer.
if (SUCCEEDED(hr))
hr = pBuffer->Lock(&pData, NULL, NULL);
if (SUCCEEDED(hr))
hr = MFCopyImage(
pData, // Destination buffer.
cbWidth, // Destination stride.
(BYTE *)lpvBits,//(BYTE*)videoFrameBuffer, // First row in source image.
cbWidth, // Source stride.
cbWidth, // Image width in bytes.
//I added x 3
VIDEO_HEIGHT // Image height in pixels.
);
if (pBuffer) pBuffer->Unlock();
// Set the data length of the buffer.
if (SUCCEEDED(hr))
hr = pBuffer->SetCurrentLength(cbBuffer);
// Create a media sample and add the buffer to the sample.
if (SUCCEEDED(hr))
hr = MFCreateSample(&pSample);
if (SUCCEEDED(hr))
hr = pSample->AddBuffer(pBuffer);
// Set the time stamp and the duration.
if (SUCCEEDED(hr))
hr = pSample->SetSampleTime(rtStart);
if (SUCCEEDED(hr))
hr = pSample->SetSampleDuration(VIDEO_FRAME_DURATION);
// Send the sample to the Sink Writer.
if (SUCCEEDED(hr))
hr = pWriter->WriteSample(streamIndex, pSample);
SafeRelease(&pSample);
SafeRelease(&pBuffer);
return hr;
}
void TriMesh::CalculateTotalGraphingData()
{
/*long iVertex;
Vertex * pVertex = X;
for (iVertex = 0; iVertex < numVertices; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pVertex->n = (pVertex->Neut.mass + pVertex->Ion.mass) / pVertex->AreaCell;
pVertex->v = (m_n*pVertex->Neut.mom + m_ion * pVertex->Ion.mom + m_e * pVertex->Elec.mom) /
(m_n*pVertex->Neut.mass + m_ion * pVertex->Ion.mass + m_e * pVertex->Elec.mass);
pVertex->T = (pVertex->Neut.heat + pVertex->Ion.heat + pVertex->Elec.heat) /
(pVertex->Neut.mass + pVertex->Ion.mass + pVertex->Elec.mass);
pVertex->Temp.x = pVertex->Ion.mass / (pVertex->Neut.mass + pVertex->Ion.mass);
};
++pVertex;
}*/
}
void TriMesh::Setup_J()
{
/*long iVertex;
Vertex * pVertex = X;
for (iVertex = 0; iVertex < numVertices; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pVertex->Temp = q * (pVertex->Ion.mom - pVertex->Elec.mom) / pVertex->AreaCell;
}
else {
memset(&(pVertex->Temp), 0, sizeof(Vector3));
}
++pVertex;
}*/
}
void surfacegraph::DrawSurface(const char * szname,
const int heightflag,
const real * var_ptr_0,
const int colourflag,
const real * var_ptr_c,
const bool bDisplayInner,
const int code, // graph code, to pass to called routines - sometimes useful
const TriMesh * pX // for passing to SetDataWithColour and Render
// and for working out offsets
)
{
// replaced CreateSurfaceGraphs.
// I think this is about the right balance.
char buff[256];
real * temprealptr = (real *)(pX->pData);
long offset = var_ptr_0 - temprealptr;
long offset_c = var_ptr_c - temprealptr;
// Does shader always go with colour type?? yes I think.
switch (colourflag) {
case VELOCITY_COLOUR:
this->mhTech = mFX->GetTechniqueByName("VelociTech");
break;
case SEGUE_COLOUR:
this->mhTech = mFX->GetTechniqueByName("SegueTech");
break;
case CURRENT_COLOUR:
this->mhTech = mFX->GetTechniqueByName("XYZTech");
break;
case AZSEGUE_COLOUR:
mhTech = mFX->GetTechniqueByName("AzSegueTech");
break;
case IONISE_COLOUR:
mhTech = mFX->GetTechniqueByName("IoniseTech");
break;
case PPN_COLOUR:
mhTech = mFX->GetTechniqueByName("ProportionTech"); // 1 = blue
break;
};
// Usual settings:
//if (GlobalGraphSetting[i] != GRAPH_NONE) {
this->boolDisplayShadow = true;
this->boolDisplayMainMesh = true;
this->boolDisplayMeshWireframe = GlobalboolDisplayMeshWireframe;
this->boolClearZBufferBeforeWireframe = false;
// Or try setting true and CULL_CCW to see if this stops it showing "the back of the wireframe"
this->SetEyeAndLookat(GlobalEye, GlobalLookat);
this->boolDisplayScales = true;
this->boolDisplayInnerMesh = bDisplayInner;
// work out whether to display key button:
if (((colourflag == FLAG_VELOCITY_COLOUR) || (colourflag == FLAG_CURRENT_COLOUR))
&& (bDisplayInner == 0))
{
this->boolDisplayKeyButton = true;
}
else {
this->boolDisplayKeyButton = false;
};
//int const FLAG_COLOUR_MESH = 0;
//int const FLAG_SEGUE_COLOUR = 1;
//int const FLAG_VELOCITY_COLOUR = 2;
//int const FLAG_CURRENT_COLOUR = 3;
//int const FLAG_AZSEGUE_COLOUR = 4;
//int const FLAG_IONISE_COLOUR = 5;
this->SetDataWithColour(*pX,
colourflag, heightflag, // apparently it's that way round
offset, offset_c,
code);
printf("DrawSurface code %d : calling Render:\n", code);
if (this->bDisplayTimestamp) {
sprintf(buff, "%6.2f ns", evaltime*1.0e9);
this->Render(szname, false, pX, buff);
}
else {
this->Render(szname, false, pX);
};
}
void Draw1Dgraph(int iWhichGraph, int flag)
{
float const MAXX = 11.0f;
float const MAXY = 6.0f;
float const YADJUST = -2.8f;
char graphname[4][128] = { "Azdot","Azdotdot","Lap Az","-4pi/c Jz" };
char Tgraphname[9][128] = { "conduction","ionization","viscosity","frictional","interspecies","dTe/dt total","compressive" ,
"DnT","undefined" };
char accelgraphname[9][128] = { "dvy/dt total", "v x B", "pressure", "neutral soak","viscosity", "ionization", "advection","grad_y Az" };
char Ohmsgraphname[20][128] = { "elastic effective fric coeff", "ionization effective fric coeff",
"thermal pressure y", "electromotive aez-aiz", "thermal force aiz-aez", "v-response T_zy", "v-response T_zz",
"T_zy * thermal pressure y", "T_zz * electromotive", "T_zz * thermal force", "Predicted vez-viz",
"Difference: prediction-vrelzk","vrelzk progress",
"viscous aez-aiz","Predicted Jz","Conductivity sigma_zy","Conductivity sigma_zz",
"sigma_zz * -electromotive", "Difference: Jz prediction-Jz","$$$" };
char arelzgraphname[12][128] = { "arelz", "MAR_ion contribution", "MAR_elec contribution",
"Ez_ext effect", "dAz/dt effect","v x B effect", "thermal force effect", "friction to neutrals",
"friction_ei", "sum of effects", "difference (error)"};
char buffer[256];
float x, y, z;
float zeroplane = 0.0f;
D3DXMATRIXA16 matWorld;
vertex1 linedata[10000];
vertex1 linedata2[12];
int iGraph;
D3DCOLOR colourlist[20];
char namelist[20][256];
bool bAlternating[20];
int numgraphs;
memset(bAlternating, 0, sizeof(bool) * 20);
if (flag == ONE_D) {
numgraphs = 4;
for (int i = 0; i < numgraphs; i++)
sprintf(namelist[i],"%s : graph max&min = +- %1.3E",
graphname[i], maximum[i]);
colourlist[0] = 0xff000000;
colourlist[1] = 0xff0022ff;
colourlist[2] = 0xffff0055;
colourlist[3] = 0xff22ff00;
};
if (flag == DTGRAPH) {
numgraphs = 8;
for (int i = 0; i < numgraphs; i++)
sprintf(namelist[i], "%s : graph max&min = +- %1.3E",
Tgraphname[i], maximum[i]);
colourlist[0] = 0xffffaa00; // conduction: orange red
colourlist[1] = 0xff0000ff; // ionization: royal blue
colourlist[2] = 0xff009999; // viscosity: aqua
colourlist[3] = 0xffd500ff; // resistive: heliotrope
colourlist[4] = 0xff00ff00; // soak: green
colourlist[5] = 0xff000000; // total
colourlist[6] = 0xff906545; // compressive: brown
};
if (flag == ACCELGRAPHS)
{
numgraphs = 8;
for (int i = 0; i < numgraphs; i++)
sprintf(namelist[i], "%s : graph max&min = +- %1.3E",
accelgraphname[i], maximum[i]);
colourlist[0] = 0xff000000; // total:
colourlist[1] = 0xffd500ff; // vxB: heliotrope
colourlist[2] = 0xffff2200; // pressure: red
colourlist[3] = 0xff00ff33; // soak: use green
colourlist[4] = 0xff009999; // viscosity: aqua
colourlist[5] = 0xff0000ff; // ionization : royal blue
colourlist[6] = 0xff906545; // advection : brown
colourlist[7] = 0xffeecd00; // grady_Az : olive?
}
if (flag == ARELZ)
{
numgraphs = 11;
for (int i = 0; i < numgraphs; i++)
sprintf(namelist[i], "%s : graph max&min = +- %1.3E",
arelzgraphname[i], maximum[i]);
colourlist[0] = 0xff000000; // total:
colourlist[1] = 0xff009999; // ion visc : aqua
colourlist[2] = 0xffeecd00; // elec visc : olive
colourlist[3] = 0xffdada66; // electromotive
colourlist[4] = 0xff4400ff; // inductive electromotive: indigo
colourlist[5] = 0xffd500ff; // vxB: heliotrope
colourlist[6] = 0xffff7700; // "thermal force effect"
colourlist[7] = 0xff00ff33; // neutral soak :green
colourlist[8] = 0xff00aa00; // dkgreen e-i friction
colourlist[9] = 0xffff2299; // pink : sum
colourlist[10] = 0xff000011; // navy
bAlternating[10] = true;
}
if (flag == OHMS2) {
numgraphs = 11;
for (int i = 0; i < numgraphs; i++)
sprintf(namelist[i], "%s :grmax+- %1.3E own|max| %1.3E",
Ohmsgraphname[i], maximum[i], truemax[i]);
colourlist[0] = 0xffcc0033; // elastic fric coeff: maroon
colourlist[1] = 0xff00aa00; // dkgreen ionization fric coeff
colourlist[2] = 0xffff0000; // pressure: red
colourlist[3] = 0xffda00ff; // electromotive: violet
colourlist[4] = 0xffff7700; // thermal force: orange
colourlist[5] = 0xff00aadd; bAlternating[5] = true; // Tzy
colourlist[6] = 0xff0000ff; // Tzz
colourlist[7] = 0xffff55aa; bAlternating[7] = true;
colourlist[8] = 0xffda00ff; bAlternating[8] = true;
colourlist[9] = 0xffffaa00; bAlternating[9] = true;
colourlist[10] = 0xff000000;
};
if (flag == OHMSLAW) {
numgraphs = 9;
for (int i = 0; i < numgraphs; i++)
sprintf(namelist[i], "%s :grmax+- %1.3E own|max| %1.3E",
Ohmsgraphname[i+10], maximum[i+10], truemax[i+10]);
colourlist[0] = 0xff000000;
colourlist[1] = 0xffff3333; // red: difference
colourlist[2] = 0xffaadd00; // lime yellow: progress
colourlist[3] = 0xff009999; // viscosity: aqua
colourlist[4] = 0xffd500ff; // prediction Jz
colourlist[5] = 0xff00bb33; // green conductivity
colourlist[6] = 0xff0022ff; // blue conductivity
colourlist[7] = 0xffda00ff; bAlternating[7] = true; // sigma_zz Ez
colourlist[8] = 0xff666666; // difference of prediction
}
f64 rmax = GRAPH1D_MAXR;
if (flag == ONE_D) rmax = DOMAIN_OUTER_RADIUS;
f64 rmin = DEVICE_RADIUS_INSULATOR_OUTER - 0.01;
if (flag == ONE_D) rmin = INNER_A_BOUNDARY;
Graph[iWhichGraph].SetEyeAndLookat(newEye, newLookat); // sets matView not matProj
printf("Eye %f %f %f\n", newEye.x, newEye.y, newEye.z);
Direct3D.pd3dDevice->SetViewport(&(Graph[iWhichGraph].vp));
D3DXMatrixIdentity(&matWorld);
//D3DXMatrixIdentity(&Graph[6].matProj); // ???????????????
Direct3D.pd3dDevice->SetTransform(D3DTS_WORLD, &matWorld);
Direct3D.pd3dDevice->SetTransform(D3DTS_VIEW, &(Graph[iWhichGraph].matView));
Direct3D.pd3dDevice->SetTransform(D3DTS_PROJECTION, &(Graph[iWhichGraph].matProj));
Direct3D.pd3dDevice->Clear(0, NULL, D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER,
D3DCOLOR_XRGB(250, 255, 250), 1.0f, 0);
if (SUCCEEDED(Direct3D.pd3dDevice->BeginScene()))
{
Direct3D.pd3dDevice->SetFVF(point_fvf);
real theta = -HALFANGLE;
real r = 3.44;
linedata[0].x = -MAXX;
linedata[0].z = 3.44*xzscale;
linedata[0].y = YADJUST;
linedata[0].colour = 0xff888888; // grey
linedata[1].x = -linedata[0].x;
linedata[1].y = YADJUST;
linedata[1].z = linedata[0].z;
linedata[1].colour = linedata[0].colour;
Direct3D.pd3dDevice->DrawPrimitiveUP(D3DPT_LINESTRIP, 1, linedata, sizeof(vertex1));
//Graph[6].RenderLabel2(buffer, // text
// MAXX*0.66f + 1.2f*(float)iGraph,
// 0.0f,
for (iGraph = 0; iGraph < numgraphs; iGraph++)
{
if (iGraph < 6) {
linedata[0].x = -MAXX;
linedata[0].z = 3.44*xzscale;
linedata[0].y = MAXY + 4.0f - 0.9f*(float)iGraph;
} else {
linedata[0].x = 0.8f;
linedata[0].z = 3.44*xzscale;
linedata[0].y = MAXY + 4.0f - 0.9f*(float)(iGraph-6);
}
linedata[1].x = linedata[0].x + 0.5f;
linedata[1].y = linedata[0].y;
linedata[1].z = linedata[0].z;
linedata[2].x = linedata[0].x + 1.0f;
linedata[2].y = linedata[0].y;
linedata[2].z = linedata[0].z;
linedata[0].colour = colourlist[iGraph];
linedata[1].colour = linedata[0].colour;
linedata[2].colour = linedata[0].colour;
if (bAlternating[iGraph]) linedata[1].colour = 0xffffffff;
Direct3D.pd3dDevice->DrawPrimitiveUP(D3DPT_LINESTRIP, 2, linedata, sizeof(vertex1));
Graph[iWhichGraph].RenderLabel2(namelist[iGraph], linedata[2].x + 0.1f, linedata[1].y - 0.3f, linedata[1].z, 0,0xff000000, true);
int asdf;
if (flag != OHMSLAW) {
for (asdf = 0; asdf < num_graph_data_points; asdf++)
{
linedata[asdf].x = (float)(MAXX - 2.0*MAXX*((graph_r[asdf] - rmin) /
(rmax - rmin)));
// map 0 to 0.0f, maximum[iGraph] to MAXY and -maximum[iGraph] to MINY
// Decide on graph scales maximum[] in preceding bit of code
linedata[asdf].y = YADJUST + (float)(MAXY*graphdata[iGraph][asdf] / maximum[iGraph]);
linedata[asdf].z = 3.44f*xzscale;
linedata[asdf].colour = colourlist[iGraph];
if ((bAlternating[iGraph]) && (asdf % 3 == 1)) linedata[asdf].colour = 0xffffffff;
};
Direct3D.pd3dDevice->DrawPrimitiveUP(D3DPT_LINESTRIP, num_graph_data_points - 1, linedata, sizeof(vertex1));
} else {
for (asdf = 0; asdf < num_graph_data_points; asdf++)
{
linedata[asdf].x = (float)(MAXX - 2.0*MAXX*((graph_r[asdf] - rmin) /
(rmax - rmin)));
// map 0 to 0.0f, maximum[iGraph] to MAXY and -maximum[iGraph] to MINY
// Decide on graph scales maximum[] in preceding bit of code
linedata[asdf].y = YADJUST + (float)(MAXY*graphdata[iGraph+10][asdf] / maximum[iGraph+10]);
linedata[asdf].z = 3.44f*xzscale;
linedata[asdf].colour = colourlist[iGraph];
if ((bAlternating[iGraph]) && (asdf % 3 == 1)) linedata[asdf].colour = 0xffffffff;
};
Direct3D.pd3dDevice->DrawPrimitiveUP(D3DPT_LINESTRIP, num_graph_data_points - 1, linedata, sizeof(vertex1));
};
//sprintf(buffer, "%2.2E", maximum[iGraph]);
//Graph[6].RenderLabel2(buffer, // text
// MAXX*0.66f + 1.2f*(float)iGraph,
// MAXY,
// linedata[0].z, 0, linedata[0].colour);
//sprintf(buffer
// linedata[0].z, 0, linedata[0].colour);
//sprintf(buffer, "-%2.2E", maximum[iGraph]);
//Graph[6].RenderLabel2(buffer, // text
// MAXX*0.66f + 1.2f*(float)iGraph,
// -MAXY,
// linedata[0].z, 0, linedata[0].colour);
};
// Vertical lines:
for (int i = 0; i < 9; i++)
{
x = 0.16*(-r*xzscale + 2.0*r*xzscale*(((real)i) / 8.0));
z = 3.44*xzscale;// (float)(cos(HALFANGLE)*DEVICE_RADIUS_INSULATOR_OUTER)*xzscale;
linedata[0].x = x; linedata[0].z = z;
linedata[1].x = x; linedata[1].z = z;
linedata[0].colour = 0xff220011;
linedata[1].colour = 0xff220011;
linedata[0].y = -6.8f + YADJUST;// GRAPHIC_MIN_Y - 1.0f;
linedata[1].y = YADJUST + (((i == 0) || (i == 8)) ? 6.0f : 0.0f);// GRAPHIC_MAX_Y + 2.5f;
Direct3D.pd3dDevice->DrawPrimitiveUP(D3DPT_LINESTRIP, 1, linedata, sizeof(vertex1));
sprintf(buffer, "%5.2f", rmin + (1.0 - ((real)i) / 8.0)*(rmax - rmin));
Graph[iWhichGraph].RenderLabel2(buffer, // text
linedata[0].x,
YADJUST - 7.6f,
linedata[0].z, 0);
};
//DXChk(mFX->SetValue(mhEyePos, &Eye, sizeof(D3DXVECTOR3)));
linedata[0].x = -0.16*r*xzscale;
linedata[0].y = YADJUST;
linedata[0].z = 3.44*xzscale;
linedata[0].colour = 0xff000000; //
linedata[1].x = 0.16*r*xzscale;
linedata[1].y = YADJUST;
linedata[1].z = linedata[0].z;
linedata[1].colour = linedata[0].colour;
Direct3D.pd3dDevice->DrawPrimitiveUP(D3DPT_LINESTRIP, 1, linedata, sizeof(vertex1));
Direct3D.pd3dDevice->EndScene();
}
else {
printf("BeginScene failed!\n\n");
getch();
}
}
void Create1DGraphingData(TriMesh * pX, bool bTdata = false, bool bAcceldata = false,
bool bOhmsData = false, bool b_arelz_data = false)
{
// Takes p_temphost3,4,5,6 and turns them into graphdata[iGraph=0,1,2,3][]
Vertex * pVertex, * pVert2;
f64_vec2 pos, pos0, pos1, pos2;
f64 dist0, dist1, dist2, wt0, wt1, wt2, wttotal, y0, y1, y2;
int iGraph, asdf, iWhich, iCorner, tri_len, i;
bool has_more, has_less, has_grad;
Triangle * pTri;
long izTri[MAXNEIGH];
long VertexIndexArray[10000];
num_graph_data_points = pX->GetVertsRightOfCutawayLine_Sorted(VertexIndexArray, graph_r, true);
printf("Xebeques furious\n Number of points %d\n", num_graph_data_points);
memset(maximum, 0, sizeof(f64) * 20);
// Method used in Render routine looks quite reasonable: find tri that crosses cutaway,
// use some kind of interp on tri. But we need to use values from p_temphost array not a graph position.
for (asdf = 0; asdf < num_graph_data_points; asdf++)
{
// if (asdf % 10 == 0) printf("<");
// printf("%d ; ", VertexIndexArray[asdf]);
pVertex = pX->X + VertexIndexArray[asdf];
// We want the tri directly to the left of it, through which (-1,0) passes.
// 1.Get these vertex indices
// which tri contains a point which is further and a point less far?
real rr = pVertex->pos.x*pVertex->pos.x + pVertex->pos.y*pVertex->pos.y;
iWhich = -1;
tri_len = pVertex->GetTriIndexArray(izTri);
for (i = 0; i < tri_len; i++)
{
pTri = pX->T + izTri[i];
has_more = false; has_less = false; has_grad = false;
for (iCorner = 0; iCorner < 3; iCorner++)
{
pVert2 = pTri->cornerptr[iCorner];
if (pVert2 != pVertex)
{
if (pVert2->pos.x*pVert2->pos.x + pVert2->pos.y*pVert2->pos.y > rr)
{
has_more = true;
}
else {
has_less = true;
};
};
if (pVert2->pos.x / pVert2->pos.y < pVertex->pos.x / pVertex->pos.y)
has_grad = true;
};
if (has_more && has_less && has_grad)
{
iWhich = i;
}
};
if (iWhich == -1) {// give up, do nothing}
printf("gave up. %d \n", VertexIndexArray[asdf]);
graphdata[0][asdf] = 0.0;
graphdata[1][asdf] = 0.0;
graphdata[2][asdf] = 0.0;
graphdata[3][asdf] = 0.0;
} else {
pTri = pX->T + izTri[iWhich];
while ((pTri->u8domain_flag != DOMAIN_TRIANGLE) && (iWhich >= 0)) {
pTri = pX->T + izTri[iWhich];
iWhich--;
};
iWhich++;
// we are needing to adjust graph_r and interp graphdata
pos.y = pVertex->pos.y;
pos.x = pVertex->pos.y*CUTAWAYANGLE; // can leave graph_r undisturbed
pos0 = pTri->cornerptr[0]->pos;
pos1 = pTri->cornerptr[1]->pos;
pos2 = pTri->cornerptr[2]->pos;
// if one sits at the CUTAWAYANGLE then we can get dist == 0.
dist0 = sqrt((pos0 - pos).dot(pos0 - pos));
dist1 = sqrt((pos1 - pos).dot(pos1 - pos));
dist2 = sqrt((pos2 - pos).dot(pos2 - pos));
if (dist0 == 0.0) {
wt0 = 1.0; wt1 = 0.0; wt2 = 0.0;
}
else {
if (dist1 == 0.0) {
wt0 = 0.0; wt1 = 1.0; wt2 = 0.0;
}
else {
if (dist2 == 0.0) {
wt0 = 0.0; wt1 = 0.0; wt2 = 1.0;
} else {
wt0 = 1.0f / dist0;
wt1 = 1.0f / dist1;
wt2 = 1.0f / dist2;
wttotal = wt0 + wt1 + wt2;
wt0 /= wttotal;
wt1 /= wttotal;
wt2 /= wttotal;
// Not a great way it has to be said.
}
}
}
if ((bTdata == false) && (bAcceldata == false) && (bOhmsData == false)
&& (b_arelz_data == false)) {
y0 = p_temphost3[(pTri->cornerptr[0] - pX->X) + BEGINNING_OF_CENTRAL];
y1 = p_temphost3[(pTri->cornerptr[1] - pX->X) + BEGINNING_OF_CENTRAL];
y2 = p_temphost3[(pTri->cornerptr[2] - pX->X) + BEGINNING_OF_CENTRAL];
graphdata[0][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if (fabs(graphdata[0][asdf]) > maximum[0]) maximum[0] = fabs(graphdata[0][asdf]);
if (numgraphs > 1) {
y0 = p_temphost4[(pTri->cornerptr[0] - pX->X) + BEGINNING_OF_CENTRAL];
y1 = p_temphost4[(pTri->cornerptr[1] - pX->X) + BEGINNING_OF_CENTRAL];
y2 = p_temphost4[(pTri->cornerptr[2] - pX->X) + BEGINNING_OF_CENTRAL];
graphdata[1][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if (fabs(graphdata[1][asdf]) > maximum[1]) maximum[1] = fabs(graphdata[1][asdf]);
};
if (numgraphs > 2) {
y0 = p_temphost5[(pTri->cornerptr[0] - pX->X) + BEGINNING_OF_CENTRAL];
y1 = p_temphost5[(pTri->cornerptr[1] - pX->X) + BEGINNING_OF_CENTRAL];
y2 = p_temphost5[(pTri->cornerptr[2] - pX->X) + BEGINNING_OF_CENTRAL];
graphdata[2][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if (fabs(graphdata[2][asdf]) > maximum[2]) maximum[2] = fabs(graphdata[2][asdf]);
};
if (numgraphs > 3) {
y0 = p_temphost6[(pTri->cornerptr[0] - pX->X) + BEGINNING_OF_CENTRAL];
y1 = p_temphost6[(pTri->cornerptr[1] - pX->X) + BEGINNING_OF_CENTRAL];
y2 = p_temphost6[(pTri->cornerptr[2] - pX->X) + BEGINNING_OF_CENTRAL];
graphdata[3][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if (fabs(graphdata[3][asdf]) > maximum[3]) maximum[3] = fabs(graphdata[3][asdf]);
}
} else {
// go through from 0 = conduction to 5 = dTe/dt itself
// we have missed out compressive...
if (bTdata) {
for (int j = 0; j < 8; j++)
{
y0 = p_Tgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_Tgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_Tgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[j][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if (fabs(graphdata[j][asdf]) > maximum[j]) maximum[j] = fabs(graphdata[j][asdf]);
}
} else {
if (bAcceldata) {
int j;
j = 1; // total
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[0][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
j = 3; // vxB
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[1][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
j = 5; // pressure
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[2][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
j = 6; // neutral soak
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[3][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
j = 8; // viscosity
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[4][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
j = 9; // ionization
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[5][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
j = 10; // advection
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[6][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
// works if comment here
// printf("%d ", asdf);
for (int j = 0; j < 7; j++)
{
// printf("%d", j);
if (fabs(graphdata[j][asdf]) > maximum[0]) {
maximum[0] = fabs(graphdata[j][asdf]);
// printf("maximum %1.9E\n", maximum[0]);
}
}
// does it work if comment here? no
j = 11; // grad_y Az
y0 = p_accelgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_accelgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_accelgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[7][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if (fabs(graphdata[7][asdf]) > maximum[7]) maximum[7] = fabs(graphdata[7][asdf]);
} else {
if (bOhmsData) {
int j;
for (j = 0; j < 19; j++) {
y0 = p_Ohmsgraph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_Ohmsgraph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_Ohmsgraph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[j][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if ((pos.y < 4.6) && (pos.y > 3.44) && (fabs(graphdata[j][asdf]) > maximum[j])) maximum[j] = fabs(graphdata[j][asdf]);
};
} else {
int j;
for (j = 0; j < 12; j++) {
y0 = p_arelz_graph_host[j][(pTri->cornerptr[0] - pX->X)];
y1 = p_arelz_graph_host[j][(pTri->cornerptr[1] - pX->X)];
y2 = p_arelz_graph_host[j][(pTri->cornerptr[2] - pX->X)];
graphdata[j][asdf] = wt0*y0 + wt1*y1 + wt2*y2;
if ((pos.y < 4.8) && (pos.y > 3.44) && (fabs(graphdata[j][asdf]) > maximum[j])) maximum[j] = fabs(graphdata[j][asdf]);
};
};
};
};
};
}; // found triangle
}; // asdf
if ((bTdata == false) && (bAcceldata == false) && (bOhmsData == false) && (b_arelz_data == false)) {
maximum[3] = max(maximum[3], maximum[2]);
maximum[2] = maximum[3];
} else {
// for dT graphs, let maximum be overall
if (bTdata) {
for (int j = 1; j <= 6; j++)
maximum[j] = max(maximum[j], maximum[j - 1]);
for (int j = 5; j >= 0; j--)
maximum[j] = maximum[j + 1];
} else {
if (bAcceldata) {
for (int j = 1; j < 7; j++)
maximum[j] = maximum[0];
} else {
if (bOhmsData) {
memcpy(truemax, maximum, sizeof(f64) * 20);
// use max 0 and 1 combined:
f64 temp = max(maximum[0], maximum[1]);
maximum[0] = temp;
maximum[1] = temp;
temp = max(max(maximum[2], maximum[3]), max(maximum[4], maximum[13]));
maximum[2] = temp;
maximum[3] = temp;
maximum[4] = temp; // thermal force
maximum[13] = temp; // viscous
temp = max(maximum[5], maximum[6]);
maximum[5] = temp;
maximum[6] = temp;
temp = max(max(maximum[7], maximum[8]), max(maximum[9], maximum[10]));
maximum[7] = temp;
maximum[8] = temp;
maximum[9] = temp;
maximum[10] = temp;
// temp = max(maximum[11], maximum[12]); // difference, progress
// maximum[11] = temp;
// maximum[12] = temp;
temp = max(maximum[14], maximum[17]);
maximum[14] = temp;
maximum[17] = temp;
temp = max(maximum[15], maximum[16]);
maximum[15] = temp;
maximum[16] = temp;
} else {
// All same scale except for "difference" = element 10
// ... and the arelz itself?
int j;
f64 temp = maximum[1];
for (j = 2; j < 9; j++)
temp = max(temp, maximum[j]);
for (j = 1; j < 9; j++)
maximum[j] = temp;
temp = max(maximum[0], maximum[9]);
maximum[0] = temp;
maximum[9] = temp; // actual vs sum
}
}
}
}
}
void RefreshGraphs(TriMesh & X, // only not const because of such as Reset_vertex_nvT
const int iGraphsFlag)
{
D3DXMATRIXA16 matWorld;
Vertex * pVertex;
long iVertex;
plasma_data * pdata;
int offset_v, offset_T;
char buff[256];
sprintf(buff, "%5.2f ns", evaltime*1.0e9);
f64 overc;
char buffer[256];
overc = 1.0 / c_;
float x, y, z;
float zeroplane = 0.0f;
int i;
int iGraph;
float const MAXX = 11.0f;
float const MAXY = 6.0f;
long iMinor;
switch (iGraphsFlag) {
case ONE_D:
// We are going to have to think about using LineTo the way it is done in RenderGraphs
// let's start by rendering in the x-y plane and we can let the present camera look at it
printf("\n\nGot to here: ONE_D\n\n");
// Create data:
Create1DGraphingData(&X);
Draw1Dgraph(6, ONE_D);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_temphost3[iVertex+BEGINNING_OF_CENTRAL];
++pVertex;
++pdata;
}
Graph[4].DrawSurface("Azdot",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true,
GRAPH_AZDOT, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_temphost4[iVertex + BEGINNING_OF_CENTRAL];
++pVertex;
++pdata;
}
Graph[1].DrawSurface("Azdotdot",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true,
GRAPH_AZDOT, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_temphost5[iVertex + BEGINNING_OF_CENTRAL];
++pdata;
}
Graph[3].DrawSurface("Lap Az",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true,
GRAPH_LAPAZ, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pdata->temp.x = p_temphost6[iVertex + BEGINNING_OF_CENTRAL];
}
else {
pdata->temp.x = 0.0;
}
++pdata;
}
Graph[5].DrawSurface("Jz",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true,
GRAPH_JZ, &X);
break;
case AZSOLVERGRAPHS:
pdata = X.pData;
// Bear in mind: iMinor won't actually get displayed
for (iMinor = 0; iMinor < NMINOR; iMinor++)
{
pdata->temp.x = p_temphost1[iMinor]; // epsilon
pdata->Azdot = p_temphost2[iMinor]; // Azdot0
pdata->temp.y = p_temphost3[iMinor]; // gamma
pdata->Az = p_temphost4[iMinor]; // Az
++pdata;
}
Graph[0].DrawSurface("epsilon",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true,
GRAPH_EPSILON, &X);
Graph[2].DrawSurface("Azdot0",
DATA_HEIGHT, (real *)(&(X.pData[0].Azdot)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].Azdot)),
true,
GRAPH_AZDOT, &X);
Graph[3].DrawSurface("regressorn",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.y)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.y)),
true,
GRAPH_OPTI, &X);
Graph[4].DrawSurface("Az",
DATA_HEIGHT, (real *)(&(X.pData[0].Az)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].Az)),
true,
GRAPH_AZ, &X);
pdata = X.pData;
for (iMinor = 0; iMinor < NMINOR; iMinor++)
{
pdata->temp.x = p_temphost5[iMinor]; // epsilon
pdata->temp.y = p_temphost6[iMinor]; // Azdot0
++pdata;
}
Graph[1].DrawSurface("regressori",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true,
GRAPH_LAPAZ, &X);
Graph[5].DrawSurface("Jacobi",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.y)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.y)),
true,
GRAPH_REGRESSOR, &X);
break;
case DTGRAPH:
// We are going to have to think about using LineTo the way it is done in RenderGraphs
// let's start by rendering in the x-y plane and we can let the present camera look at it
printf("\n\nRefreshGraphs: DTGRAPHS\n\n");
// Create data:
Create1DGraphingData(&X, true);
Draw1Dgraph(6, DTGRAPH);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_Tgraph_host[5][iVertex];
++pVertex;
++pdata;
}
Graph[4].DrawSurface("dTe/dt",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false,
GRAPH_DTE, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_Tgraph_host[7][iVertex];
++pVertex;
++pdata;
}
Graph[1].DrawSurface("d/dt nTe",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_DNT, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_Tgraph_host[5][iVertex];
++pdata;
}
Graph[3].DrawSurface("n",
DATA_HEIGHT, (real *)(&(X.pData[0].n)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_ION_N, &X);
Graph[5].DrawSurface("Te",
DATA_HEIGHT, (real *)(&(X.pData[0].Te)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false,
GRAPH_ELEC_T, &X);
break;
case OHMS2:
// We are going to have to think about using LineTo the way it is done in RenderGraphs
// let's start by rendering in the x-y plane and we can let the present camera look at it
printf("\n\nRefreshGraphs: OHMS2\n\n");
// Create data:
Create1DGraphingData(&X, false, false, true);
Draw1Dgraph(6, OHMS2);
Draw1Dgraph(7, OHMSLAW);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_Ohmsgraph_host[14][iVertex];
++pVertex;
++pdata;
}
Graph[4].DrawSurface("Jz prediction",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false,
GRAPH_JZ, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_Ohmsgraph_host[17][iVertex];
++pVertex;
++pdata;
}
Graph[5].DrawSurface("electromotive-only prediction",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false,
GRAPH_VE0Z, &X);
break;
case ACCELGRAPHS:
// We are going to have to think about using LineTo the way it is done in RenderGraphs
// let's start by rendering in the x-y plane and we can let the present camera look at it
printf("\n\nRefreshGraphs: ACCELGRAPHS\n\n");
// Create data:
Create1DGraphingData(&X, false, true);
Draw1Dgraph(6, ACCELGRAPHS);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_accelgraph_host[0][iVertex];
pdata->temp.y = p_accelgraph_host[1][iVertex];
++pVertex;
++pdata;
}
Graph[4].DrawSurface("dvxy/dt",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].temp.x)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_AXY, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_accelgraph_host[2][iVertex];
pdata->temp.y = p_accelgraph_host[3][iVertex];
++pVertex;
++pdata;
}
Graph[1].DrawSurface("axy : v x B",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].temp.x)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_AXY2, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_accelgraph_host[4][iVertex];
pdata->temp.y = p_accelgraph_host[5][iVertex];
++pdata;
}
Graph[3].DrawSurface("axy : pressure",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].temp.x)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_AXY3, &X);
Graph[5].DrawSurface("vxy",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].vxy)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].vxy)),
false,
GRAPH_ION_V, &X);
break;
case ARELZ:
// We are going to have to think about using LineTo the way it is done in RenderGraphs
// let's start by rendering in the x-y plane and we can let the present camera look at it
printf("\n\nRefreshGraphs: ARELZ\n\n");
// Create data:
Create1DGraphingData(&X, false, false, false, true);
Draw1Dgraph(6, ARELZ);
// Graphs:
// .. arelz
// .. electromotive
// .. v x B
// .. error
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_arelz_graph_host[0][iVertex];
pdata->temp.y = p_arelz_graph_host[0][iVertex];
++pVertex;
++pdata;
}
Graph[4].DrawSurface("arelz",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_ARELZ, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_arelz_graph_host[3][iVertex] +
p_arelz_graph_host[4][iVertex];
++pVertex;
++pdata;
}
Graph[1].DrawSurface("-e/m Ez_total",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_ELECTROMOTIVE, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_arelz_graph_host[5][iVertex];
++pdata;
}
Graph[3].DrawSurface("arelz : v x B",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_VXBARELZ, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_arelz_graph_host[10][iVertex];
if (pdata->temp.x > 1.0e13) {
printf("%d %1.9E | ", iVertex, pdata->temp.x);
}
++pdata;
}
Graph[5].DrawSurface("error in sum",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_ERROR, &X);
// Cannot explain why maximum on graph is 1e13 not 1e5 as reported on 1D graph.
break;
/*
case JXY_RHO_EXY_GRADPHI_AXYDOTOC_AXY:
X.Setup_J(); // the others can already exist.
Graph[4].bDisplayTimestamp = true;
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pVertex->temp2.x = q * (pVertex->Ion.mass - pVertex->Elec.mass) / pVertex->AreaCell;
}
else {
pVertex->temp2.x = 0.0;
};
pVertex->Adot /= c;
++pVertex;
}
Graph[0].DrawSurface("Exy[statV/cm]",
VELOCITY_HEIGHT, (real *)(&(X.X[0].E)),
VELOCITY_COLOUR, (real *)(&(X.X[0].E)),
true,
GRAPH_EXY, &X);
Graph[1].DrawSurface("Adotxy/c[statV/cm]",
VELOCITY_HEIGHT, (real *)(&(X.X[0].Adot.x)),
VELOCITY_COLOUR, (real *)(&(X.X[0].Adot.x)),
true,
GRAPH_ADOTXY, &X);
Graph[2].DrawSurface("Jxy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].Temp.x)),
VELOCITY_COLOUR, (real *)(&(X.X[0].Temp.x)),
false, // no inner mesh display.
GRAPH_JXY, &X);
Graph[3].DrawSurface("phidot[statV/s]",
DATA_HEIGHT, (real *)(&(X.X[0].phidot)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].phidot)),
true,
GRAPH_PHIDOT, &X);
Graph[4].DrawSurface("rho",
DATA_HEIGHT, (real *)(&(X.X[0].temp2.x)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].temp2.x)),
false, // no inner mesh display.
GRAPH_RHO, &X);
Graph[5].DrawSurface("phi[statV]",
DATA_HEIGHT, (real *)(&(X.X[0].phi)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].phi)),
true,
GRAPH_PHI, &X);
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
pVertex->Adot *= c;
++pVertex;
}
break;
case JZ_AZ_BXY_EZ_ADOTZOC_NVZ:
X.Reset_vertex_nvT(SPECIES_ELEC);
X.Setup_J(); // the others can already exist.
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
pVertex->Adot /= c;
++pVertex;
}
Graph[4].bDisplayTimestamp = true;
Graph[0].DrawSurface("Ez[statV/cm]",
DATA_HEIGHT, (real *)(&(X.X[0].E.z)),
FLAG_SEGUE_COLOUR, (real *)(&(X.X[0].E.z)),
false, // ??
GRAPH_EZ, &X);
Graph[1].DrawSurface("Az",
DATA_HEIGHT, (real *)(&(X.X[0].A.z)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].A.z)),
true,
GRAPH_AZ, &X);
Graph[2].DrawSurface("Jz",
DATA_HEIGHT, (real *)(&(X.X[0].Temp.z)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].Temp.z)),
false, // no inner mesh display.
GRAPH_JZ, &X);
Graph[3].DrawSurface("Bxy[Gs]",
VELOCITY_HEIGHT, (real *)(&(X.X[0].B)),
VELOCITY_COLOUR, (real *)(&(X.X[0].B)),
true, // no inner mesh display: ??
GRAPH_BXY, &X);
Graph[4].DrawSurface("Adotz/c [statV/cm]",
DATA_HEIGHT, (real *)(&(X.X[0].Adot.z)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].Adot.z)),
true,
GRAPH_AZ, &X);
Graph[5].colourmax = Graph[2].colourmax;
Graph[5].DrawSurface("Elec n",
DATA_HEIGHT, (real *)(&(X.X[0].n)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].Temp.z)),
false, // no inner mesh display
GRAPH_ELEC_N, &X);
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
pVertex->Adot *= c;
++pVertex;
}
break;
case SPECIES_ELECTRON2:
X.Reset_vertex_nvT(SPECIES_ELEC);
Graph[0].DrawSurface("Elec n [/cc]",
DATA_HEIGHT, (real *)(&(X.X[0].n)),
VELOCITY_COLOUR, (real *)(&(X.X[0].v)),
false, // no inner mesh display
GRAPH_ELEC_N, &X);
Graph[1].DrawSurface("v_e_xy[cm/s]",
VELOCITY_HEIGHT, (real *)(&(X.X[0].v)),
VELOCITY_COLOUR, (real *)(&(X.X[0].v)),
false, // no inner mesh display
GRAPH_ELEC_V, &X);
Graph[3].DrawSurface("v_e_z[cm/s]",
DATA_HEIGHT, (real *)(&(X.X[0].v.z)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].v.z)),
false, // no inner mesh display.
GRAPH_VEZ, &X);
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pVertex->temp2.x = (pVertex->Ion.mass - pVertex->Elec.mass) / pVertex->AreaCell;
}
else {
pVertex->temp2.x = 0.0;
};
++pVertex;
}
Graph[2].bDisplayTimestamp = false;
Graph[2].DrawSurface("n_i-n_e",
DATA_HEIGHT, (real *)(&(X.X[0].temp2.x)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].temp2.x)),
false, // no inner mesh display.
GRAPH_NINE, &X);
Graph[5].TickRescaling = 1.0 / kB;
Graph[5].DrawSurface("Elec T [eV]",
DATA_HEIGHT, (real *)(&(X.X[0].T)),
SEGUE_COLOUR, (real *)(&(X.X[0].T)),
false, // no inner mesh display
GRAPH_ELEC_T, &X);
Graph[5].TickRescaling = 1.0;
offset_v = (real *)(&(X.X[0].v)) - (real *)(&(X.X[0]));
offset_T = (real *)(&(X.X[0].T)) - (real *)(&(X.X[0]));
Graph[4].SetEyePlan(GlobalPlanEye);
Graph[4].boolDisplayMeshWireframe = true;
Graph[4].boolClearZBufferBeforeWireframe = true;
Graph[4].boolDisplayMainMesh = true;
Graph[4].boolDisplayInnerMesh = false;
Graph[4].boolDisplayScales = false;
Graph[4].boolDisplayShadow = false;
Graph[4].mhTech = Graph[4].mFX->GetTechniqueByName("VelociTech");
Graph[4].colourmax = Graph[0].colourmax; // match colours
Graph[4].SetDataWithColour(X, FLAG_VELOCITY_COLOUR, FLAG_FLAT_MESH, offset_v, offset_v,
GRAPH_FLAT_WIRE_MESH);
Graph[4].Render(buff, GlobalRenderLabels, &X);
break;
*/
case IONIZEGRAPH:
printf("\n\nRefreshGraphs: IONIZEGRAPHS\n\n");
// When we come to speed up graphs, make it so we can
// just pass an array of f64. !!!!
// Investigate graphs half an hour: what's up with the rest?
// Move table, start running.
// Can we bring back cutaway any how?
// Wanted acceleration graphs.
// Want to do a big run.
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_graphdata1_host[iVertex];
pdata->temp.y = p_graphdata2_host[iVertex]; // dn/dt /n
++pVertex;
++pdata;
}
Graph[0].DrawSurface("dn/dt",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false,
GRAPH_DNDT, &X);
Graph[1].DrawSurface("dn/dt / n",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.y)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false,
GRAPH_DNDT_OVER_n, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_graphdata3_host[iVertex]; // log10 n
++pVertex;
++pdata;
}
Graph[3].DrawSurface("log10(n)",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_LOG10N, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_graphdata4_host[iVertex]; // dTe/dt
pdata->temp.y = p_graphdata6_host[iVertex]; // n/nn
++pVertex;
++pdata;
}
Graph[2].DrawSurface("dTe/dt[ionization]",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false,
GRAPH_DTEDT, &X);
Graph[4].DrawSurface("n_e / n_total",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.y)),
IONISE_COLOUR, (real *)(&(X.pData[0].temp.y)),
false,
GRAPH_FRACTION, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_graphdata5_host[iVertex]; // dvez/dt
//if (pdata->vez != 0.0f) {
// pdata->temp.y = pdata->temp.x / (pdata->vez);
//} else {
// pdata->temp.y = 0.0;
//}
++pVertex;
++pdata;
}
printf("got to here 1");
Graph[5].DrawSurface("accel ez[ionization]",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].vez)),
false,
GRAPH_AEZ1, &X);
// Do we need another shader? Or can we reset limits?
// see what scale is like.
printf("got to here 2");
break;
case OVERALL:
printf("\n\nRefreshGraphs: OVERALL\n\n");
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pdata->temp.x = pdata->n + pdata->n_n;
pdata->temp.y = pdata->n / (1.0 + pdata->temp.x);
} else {
pdata->temp.x = 0.0;
pdata->temp.y = 0.0;
}
++pVertex;
++pdata;
}
Graph[0].DrawSurface("n_n + n_ion",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
IONISE_COLOUR, (real *)(&(X.pData[0].temp.y)),
false,
GRAPH_TOTAL_N, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pdata->temp.x = (m_neutral_*pdata->n_n*pdata->v_n.x
+ (m_ion_ + m_e_) * pdata->n*pdata->vxy.x) /
(m_neutral_*pdata->n_n + (m_ion_ + m_e_)*pdata->n);
pdata->temp.y = (m_neutral_*pdata->n_n*pdata->v_n.y
+ (m_ion_ + m_e_) * pdata->n*pdata->vxy.y) /
(m_neutral_*pdata->n_n + (m_ion_ + m_e_)*pdata->n);
} else {
pdata->temp.x = 0.0; pdata->temp.y = 0.0;
}
++pVertex;
++pdata;
}
Graph[1].DrawSurface("sum[n_s v_s m_s]/sum[n_s m_s]",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].temp.x)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display
GRAPH_TOTAL_V, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pdata->temp.x = (pdata->n_n*pdata->Tn
+ pdata->n*(pdata->Ti + pdata->Te)) /
(pdata->n_n + pdata->n + pdata->n);
} else {
pdata->temp.x = 0.0; pdata->temp.y = 0.0;
}
++pVertex;
++pdata;
}
Graph[3].TickRescaling = 1.0 / kB_;
Graph[3].DrawSurface("sum[n_s T_s]/sum[n_s]",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
SEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_TOTAL_T, &X);
Graph[3].TickRescaling = 1.0;
Graph[2].DrawSurface("Neutral n",
DATA_HEIGHT, (real *)(&(X.pData[0].n_n)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].v_n)),
false, // no inner mesh display
GRAPH_NEUT_N, &X);
Graph[4].DrawSurface("Neutral v",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].v_n)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].v_n)),
false, // no inner mesh display
GRAPH_NEUT_V, &X);
Graph[5].TickRescaling = 1.0 / kB_;
Graph[5].DrawSurface("Neutral T",
DATA_HEIGHT, (real *)(&(X.pData[0].Tn)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Tn)),
false, // no inner mesh display
GRAPH_NEUT_T, &X);
Graph[5].TickRescaling = 1.0;
break;
case SPECIES_ION:
printf("\n\nRefreshGraphs: SPECIES_ION\n\n");
Graph[3].TickRescaling = 1.0 / kB_;
Graph[3].DrawSurface("Ion T",
DATA_HEIGHT, (real *)(&(X.pData[0].Ti)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Ti)),
false, // no inner mesh display
GRAPH_ION_T, &X);
Graph[3].TickRescaling = 1.0;
// labels only appear on first 1 called.
Graph[0].DrawSurface("Ion n",
DATA_HEIGHT, (real *)(&(X.pData[0].n)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].vxy)),
false, // no inner mesh display
GRAPH_ION_N, &X);
Graph[1].DrawSurface("Ion v",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].vxy)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].vxy)),
false, // no inner mesh display
GRAPH_ION_V, &X);
// These are same so double up with elec.
Graph[5].TickRescaling = 1.0 / kB_;
Graph[5].DrawSurface("Elec T",
DATA_HEIGHT, (real *)(&(X.pData[0].Te)),
SEGUE_COLOUR, (real *)(&(X.pData[0].Te)),
false, // no inner mesh display
GRAPH_ELEC_T, &X);
Graph[5].TickRescaling = 1.0;
break;
/* case SPECIES_ELEC:
Graph[0].DrawSurface("Elec n",
DATA_HEIGHT, (real *)(&(X.pData[0].n)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].vxy)),
false, // no inner mesh display
GRAPH_ELEC_T, &X);
// colours == 0.0 ... because v = 0
// First........... let's understand why surface normals come out unpredictable.
// Then............ let's go and see what it does with y values (in Render and .fx)
Graph[1].DrawSurface("Elec v",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].vxy)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].vxy)),
false, // no inner mesh display
GRAPH_ELEC_V, &X);
break;
// In other cases, (and even for the above),
// here is a good place to call the
// setup routines for temp variables.
*/
case OHMSLAW:
printf("\n\nRefreshGraphs: OHMSLAW\n\n");
// 0. q/ m_e nu_sum
// 1. qn / m_e nu_sum
// 2. nu_sum
// 3. prediction of Jz from uniform Ez
// 4. prediction of Jz from actual Ez
// 5. Actual Jz
// Let temphost1 = nu_en + nu_ei_effective
// Let temphost2 = nu_en/temphost1
// Cannot explain why, that comes out black and this doesn't.
// Oh because colourmax has been set to 1 or not?
// Yet the following crashes it. Bizarre? Maybe dividing by 0?
overc = 1.0 / c_;
pdata = X.pData + BEGINNING_OF_CENTRAL;
pVertex = X.X;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pdata->temp.x = q_ / (m_e_ * (1.0 + p_temphost1[iVertex + BEGINNING_OF_CENTRAL]));
pdata->temp.y = p_temphost2[iVertex + BEGINNING_OF_CENTRAL]; // colour
} else {
pdata->temp.x = 0.0;
pdata->temp.y = 0.0;
}
++pdata;
++pVertex;
};
Graph[0].DrawSurface("q over m nu_effective",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
PPN_COLOUR, (real *)(&(X.pData[0].temp.y)),
false, // no inner mesh display.
GRAPH_VRESPONSEOHMS, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
pVertex = X.X;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pdata->temp.x = q_*X.pData[iVertex + BEGINNING_OF_CENTRAL].n /
(m_e_ * (1.0 + p_temphost1[iVertex + BEGINNING_OF_CENTRAL]));
pdata->temp.y = p_temphost2[iVertex + BEGINNING_OF_CENTRAL]; // colour
} else {
pdata->temp.x = 0.0;
pdata->temp.y = 0.0;
};
++pdata;
++pVertex;
};
Graph[1].DrawSurface("qn / m nu_effective",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
PPN_COLOUR, (real *)(&(X.pData[0].temp.y)),
false, // no inner mesh display.
GRAPH_CONDUCTIVITYOHMS, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
pVertex = X.X;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pdata->temp.x = p_temphost1[iVertex + BEGINNING_OF_CENTRAL];
pdata->temp.y = p_temphost2[iVertex + BEGINNING_OF_CENTRAL]; // colour
};
++pVertex;
++pdata;
};
Graph[2].DrawSurface("nu_effective (blue=neut dominates)",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
PPN_COLOUR, (real *)(&(X.pData[0].temp.y)),
false, // no inner mesh display.
GRAPH_NU_EFFECTIVE, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
pVertex = X.X;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pdata->temp.x = EzStrength_*q_*q_*X.pData[iVertex + BEGINNING_OF_CENTRAL].n /
(m_e_ * (1.0 + p_temphost1[iVertex + BEGINNING_OF_CENTRAL]));
};
++pdata;
};
Graph[3].DrawSurface("predict Jz (uniform Ez)",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_JZ, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
pVertex = X.X;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pdata->temp.x = (EzStrength_
- X.pData[iVertex + BEGINNING_OF_CENTRAL].Azdot*overc
)*q_*q_*X.pData[iVertex + BEGINNING_OF_CENTRAL].n /
(m_e_ * (1.0 + p_temphost1[iVertex + BEGINNING_OF_CENTRAL]));
};
++pdata;
};
Graph[4].DrawSurface("predict Jz (Ez)",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_JZ, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
pVertex = X.X;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pdata->temp.x = q_*X.pData[iVertex + BEGINNING_OF_CENTRAL].n*
(X.pData[iVertex + BEGINNING_OF_CENTRAL].viz - X.pData[iVertex + BEGINNING_OF_CENTRAL].vez);
};
++pdata;
};
Graph[5].DrawSurface("actual Jz",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_JZ, &X);
break;
case JZAZBXYEZ:
printf("\n\nRefreshGraphs: JZAZBXYEZ\n\n");
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = q_ * pdata->n*(pdata->viz - pdata->vez);
++pdata;
};
Graph[3].DrawSurface("Jz",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_JZ, &X);
// create graph data for Ez : add Ez_strength*Ezshape to -Azdot/c
overc = 1.0 / c_;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
X.pData[iVertex + BEGINNING_OF_CENTRAL].temp.y =
-X.pData[iVertex + BEGINNING_OF_CENTRAL].Azdot*overc
+ GetEzShape__(X.pData[iVertex + BEGINNING_OF_CENTRAL].pos.modulus())*EzStrength_;
}
Graph[2].DrawSurface("Ez",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.y)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)), // use Jz's colour
false,
GRAPH_EZ, &X);
Graph[0].DrawSurface("Az",
DATA_HEIGHT, (real *)(&(X.pData[0].Az)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].Az)),
true, GRAPH_AZ, &X);
// for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
// {
// X.pData[iVertex + BEGINNING_OF_CENTRAL].temp.x = p_B_host[iVertex + BEGINNING_OF_CENTRAL].x;
// X.pData[iVertex + BEGINNING_OF_CENTRAL].temp.y = p_B_host[iVertex + BEGINNING_OF_CENTRAL].y;
// }
Graph[1].DrawSurface("Bxy",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].B.x)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].B.x)),
false,
GRAPH_BXY, &X);
Graph[5].DrawSurface("vez",
DATA_HEIGHT, (real *)(&(X.pData[0].vez)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)), // colour is for Jz?
false, GRAPH_VEZ, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = -p_temphost3[iVertex + BEGINNING_OF_CENTRAL]/c_;
++pVertex;
++pdata;
}
Graph[4].DrawSurface("-Azdot/c",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true,
GRAPH_AZDOT, &X);
// pdata = X.pData + BEGINNING_OF_CENTRAL;
// for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
// {
// pdata->temp.x = temp_array_host[iVertex + BEGINNING_OF_CENTRAL];
// ++pdata;
// };
// Graph[4].DrawSurface("Lap Az",
// DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
// AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
// true, GRAPH_LAPAZ, &X);
break;
case VIZVEZJZAZDOT:
printf("\n\nRefreshGraphs: VIZVEZJZAZDOT\n\n");
// Set Jz:
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = q_ * pdata->n*(pdata->viz - pdata->vez);
++pdata;
};
Graph[0].DrawSurface("viz",
DATA_HEIGHT, (real *)(&(X.pData[0].viz)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, GRAPH_VIZ, &X);
Graph[1].DrawSurface("vez",
DATA_HEIGHT, (real *)(&(X.pData[0].vez)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, GRAPH_VEZ, &X);
Graph[2].DrawSurface("Azdot",
DATA_HEIGHT, (real *)(&(X.pData[0].Azdot)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].Azdot)),
true, GRAPH_AZDOT, &X);
Graph[3].DrawSurface("Jz",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, GRAPH_JZ, &X);
break;
/*
case NEWSTUFF:
// Too bad substep is not stated. We should divide by substep to give anything meaningful
// in these graphs.
// Let temphost3 = vez0
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_temphost3[iVertex + BEGINNING_OF_CENTRAL];
++pdata;
};
Graph[0].DrawSurface("vez0 : vez = vez0 + sigma Ez",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_VEZ0, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = p_OhmsCoeffs_host[iVertex + BEGINNING_OF_CENTRAL].sigma_e_zz;
++pdata;
};
Graph[1].DrawSurface("sigma : vez = vez0 + sigma Ez",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_RESPONSE, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = q_*X.pData[iVertex + BEGINNING_OF_CENTRAL].n*
(p_OhmsCoeffs_host[iVertex + BEGINNING_OF_CENTRAL].sigma_i_zz
- p_OhmsCoeffs_host[iVertex + BEGINNING_OF_CENTRAL].sigma_e_zz);
// Will show something not very useful ---- in a brief instant there
// isn't much time for second-order (frictional) effects.
++pdata;
};
Graph[2].DrawSurface("Ez=0 v addition: vez0-vez",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_DECEL, &X);
// Too bad substep is not stated. We should divide by substep to give anything meaningful
// in these graphs.
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = q_*X.pData[iVertex + BEGINNING_OF_CENTRAL].n*
(p_OhmsCoeffs_host[iVertex + BEGINNING_OF_CENTRAL].sigma_i_zz
- p_OhmsCoeffs_host[iVertex + BEGINNING_OF_CENTRAL].sigma_e_zz);
// Will show something not very useful ---- in a brief instant there
// isn't much time for second-order (frictional) effects.
++pdata;
};
Graph[3].DrawSurface("dynamic conductivity q n sigma : vez = vez0 + sigma Ez",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_DYNCONDUCTIVITY, &X);
// create graph data for Ez : add Ez_strength*Ezshape to -Azdot/c
overc = 1.0 / c_;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
X.pData[iVertex + BEGINNING_OF_CENTRAL].temp.y =
-X.pData[iVertex + BEGINNING_OF_CENTRAL].Azdot*overc
+ GetEzShape__(X.pData[iVertex + BEGINNING_OF_CENTRAL].pos.modulus())*EzStrength_;
}
Graph[4].DrawSurface("Ez",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.y)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)), // use Jz's colour
false,
GRAPH_EZ, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.x = q_ * pdata->n*(pdata->viz - pdata->vez);
++pdata;
};
Graph[5].DrawSurface("Jz",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display.
GRAPH_JZ, &X);
break;*/
case LAPAZ_AZ:
printf("\n\nRefreshGraphs: LAPAZ_AZ\n\n");
// Assume temp.x contains Lap Az
Graph[0].DrawSurface("Lap Az",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
true, GRAPH_LAPAZ, &X);
Graph[1].DrawSurface("Az",
DATA_HEIGHT, (real *)(&(X.pData[0].Az)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].Az)),
true, GRAPH_AZ, &X);
Graph[2].DrawSurface("Azdot",
DATA_HEIGHT, (real *)(&(X.pData[0].Azdot)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].Azdot)),
true, GRAPH_AZDOT, &X);
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
pdata->temp.y = q_ * pdata->n*(pdata->viz - pdata->vez);
++pdata;
};
Graph[3].DrawSurface("Jz",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.y)),
AZSEGUE_COLOUR, (real *)(&(X.pData[0].temp.y)),
false, GRAPH_JZ, &X);
break;
case EXYCOMPONENTS:
/*
X.Setup_J(); // the others can already exist.
Graph[0].DrawSurface("Adotxy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].Adot.x)),
VELOCITY_COLOUR, (real *)(&(X.X[0].Adot.x)),
true,
GRAPH_ADOTXY, &X);
Graph[1].DrawSurface("Grad phi",
VELOCITY_HEIGHT, (real *)(&(X.X[0].GradTe)),
VELOCITY_COLOUR, (real *)(&(X.X[0].GradTe)),
true, // no inner mesh display: ??
GRAPH_GRADPHI, &X);
Graph[2].DrawSurface("Exy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].E)),
VELOCITY_COLOUR, (real *)(&(X.X[0].E)),
true,
GRAPH_EXY, &X);
Graph[3].DrawSurface("Jxy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].Temp.x)),
VELOCITY_COLOUR, (real *)(&(X.X[0].Temp.x)),
false, // no inner mesh display.
GRAPH_JXY, &X);
*/
// Set GradTe to grad phi
break;
case JXYAXYBZEXY:
/*
X.Setup_J(); // the others can already exist.
Graph[0].DrawSurface("Axy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].A.x)),
VELOCITY_COLOUR, (real *)(&(X.X[0].A.x)),
true,
GRAPH_AXY, &X);
Graph[1].DrawSurface("Bz",
DATA_HEIGHT, (real *)(&(X.X[0].B.z)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].B.z)),
true, // no inner mesh display: ??
GRAPH_BZ, &X);
Graph[2].DrawSurface("Exy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].E)),
VELOCITY_COLOUR, (real *)(&(X.X[0].E)),
true,
GRAPH_EXY, &X);
Graph[3].DrawSurface("Jxy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].Temp.x)),
VELOCITY_COLOUR, (real *)(&(X.X[0].Temp.x)),
false, // no inner mesh display.
GRAPH_JXY, &X);
*/
break;
case EXY_RHO_PHI_PHIDOT:
/*
// For this one do n_i-n_e
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pVertex->temp2.x = (pVertex->Ion.mass - pVertex->Elec.mass) / pVertex->AreaCell;
}
else {
pVertex->temp2.x = 0.0;
};
++pVertex;
}
Graph[0].DrawSurface("phi",
DATA_HEIGHT, (real *)(&(X.X[0].phi)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].phi)),
true,
GRAPH_PHI, &X);
Graph[1].DrawSurface("phidot",
DATA_HEIGHT, (real *)(&(X.X[0].phidot)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].phidot)),
true,
GRAPH_PHIDOT, &X);
Graph[2].DrawSurface("Exy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].E)),
VELOCITY_COLOUR, (real *)(&(X.X[0].E)),
true,
GRAPH_EXY, &X);
Graph[3].DrawSurface("n_i-n_e",
DATA_HEIGHT, (real *)(&(X.X[0].temp2.x)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].temp2.x)),
false, // no inner mesh display.
GRAPH_NINE, &X);
*/
break;
case EXY_RHO_PHI_JXY:
// create rho on pVertex->temp2.x ...
/*
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pVertex->temp2.x = q * (pVertex->Ion.mass - pVertex->Elec.mass) / pVertex->AreaCell;
}
else {
pVertex->temp2.x = 0.0;
};
++pVertex;
}
X.Setup_J();
Graph[0].DrawSurface("phi",
DATA_HEIGHT, (real *)(&(X.X[0].phi)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].phi)),
false,
GRAPH_PHI, &X);
Graph[1].DrawSurface("Jxy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].Temp)),
VELOCITY_COLOUR, (real *)(&(X.X[0].Temp)),
false, // no inner mesh display: ??
GRAPH_JXY, &X);
Graph[2].DrawSurface("Exy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].E)),
VELOCITY_COLOUR, (real *)(&(X.X[0].E)),
false,
GRAPH_EXY, &X);
Graph[3].DrawSurface("rho",
DATA_HEIGHT, (real *)(&(X.X[0].temp2.x)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].temp2.x)),
false, // no inner mesh display.
GRAPH_RHO, &X);
*/
break;
case EXY_RHO_BZ_JXY:
/*
// create rho on pVertex->temp2.x ...
pVertex = pX->X;
for (iVertex = 0; iVertex < pX->numVertices; iVertex++)
{
if (pVertex->flags == DOMAIN_VERTEX) {
pVertex->temp2.x = q * (pVertex->Ion.mass - pVertex->Elec.mass) / pVertex->AreaCell;
}
else {
pVertex->temp2.x = 0.0;
};
++pVertex;
}
X.Setup_J();
Graph[0].DrawSurface("Bz",
DATA_HEIGHT, (real *)(&(X.X[0].B.z)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].B.z)),
true, // no inner mesh display: ??
GRAPH_BZ, &X);
Graph[1].DrawSurface("Jxy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].Temp)),
VELOCITY_COLOUR, (real *)(&(X.X[0].Temp)),
false, // no inner mesh display: ??
GRAPH_JXY, &X);
Graph[2].DrawSurface("Exy",
VELOCITY_HEIGHT, (real *)(&(X.X[0].E)),
VELOCITY_COLOUR, (real *)(&(X.X[0].E)),
false,
GRAPH_EXY, &X);
Graph[3].DrawSurface("rho",
DATA_HEIGHT, (real *)(&(X.X[0].temp2.x)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].temp2.x)),
false, // no inner mesh display.
GRAPH_RHO, &X);
*/
break;
//case SIGMA_E_J:
/*
X.Setup_J(); // the others can already exist.
Graph[0].DrawSurface("sigma_e_zz",
DATA_HEIGHT, (real *)(&(X.X[0].sigma_e.zz)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].sigma_e.zz)),
true,
GRAPH_SIGMA_E, &X);
//Graph[1].DrawSurface("v_e_0.z",
// DATA_HEIGHT,(real *)(&(X.X[0].v_e_0.z)),
// AZSEGUE_COLOUR,(real *)(&(X.X[0].v_e_0.z)),
//false, // no inner mesh display: ??
// GRAPH_VE0Z, &X);
Graph[1].DrawSurface("nsigma",
DATA_HEIGHT, (real *)(&(X.X[0].xdotdot.x)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].xdotdot.x)),
true, GRAPH_SIGMATEMP, &X);
Graph[2].DrawSurface("Ez",
DATA_HEIGHT, (real *)(&(X.X[0].E.z)),
FLAG_AZSEGUE_COLOUR, (real *)(&(X.X[0].E.z)), // how to make SEGUE_COLOUR work?
false, // ??
GRAPH_EZ, &X);
Graph[3].DrawSurface("Jz",
DATA_HEIGHT, (real *)(&(X.X[0].Temp.z)),
AZSEGUE_COLOUR, (real *)(&(X.X[0].Temp.z)),
false, // no inner mesh display.
GRAPH_JZ, &X);
*/
// break;
case TOTAL:
// In this case we have to create data,
// as we go.
// Best put it here so we can see where
// data is being populated.
/*long iVertex;
Vertex * pVertex = X;
for (iVertex = 0; iVertex < numVertices; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pVertex->n = (pVertex->Neut.mass + pVertex->Ion.mass) / pVertex->AreaCell;
pVertex->v = (m_n*pVertex->Neut.mom + m_ion * pVertex->Ion.mom + m_e * pVertex->Elec.mom) /
(m_n*pVertex->Neut.mass + m_ion * pVertex->Ion.mass + m_e * pVertex->Elec.mass);
pVertex->T = (pVertex->Neut.heat + pVertex->Ion.heat + pVertex->Elec.heat) /
(pVertex->Neut.mass + pVertex->Ion.mass + pVertex->Elec.mass);
pVertex->Temp.x = pVertex->Ion.mass / (pVertex->Neut.mass + pVertex->Ion.mass);
};
++pVertex;
}*/
//X.CalculateTotalGraphingData();
printf("\n\nRefreshGraphs: TOTAL\n\n");
// ought to change this to use variables n,v,T !
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pdata->temp.x = pdata->n + pdata->n_n;
pdata->temp.y = pdata->n / pdata->temp.x;
}
++pVertex;
++pdata;
}
Graph[0].DrawSurface("n_n + n_ion",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
IONISE_COLOUR, (real *)(&(X.pData[0].temp.y)),
false,
GRAPH_TOTAL_N, &X);
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pdata->temp.x = (m_neutral_*pdata->n_n*pdata->v_n.x
+ (m_ion_ + m_e_) * pdata->n*pdata->vxy.x) /
(m_neutral_*pdata->n_n + (m_ion_ + m_e_)*pdata->n);
pdata->temp.y = (m_neutral_*pdata->n_n*pdata->v_n.y
+ (m_ion_ + m_e_) * pdata->n*pdata->vxy.y) /
(m_neutral_*pdata->n_n + (m_ion_ + m_e_)*pdata->n);
}
++pVertex;
++pdata;
}
Graph[1].DrawSurface("sum[n_s v_s m_s]/sum[n_s m_s]",
VELOCITY_HEIGHT, (real *)(&(X.pData[0].temp.x)),
VELOCITY_COLOUR, (real *)(&(X.pData[0].temp.x)),
false, // no inner mesh display
GRAPH_TOTAL_V, &X);
//Graph[2].DrawSurface("n_n+n_ion",
//DATA_HEIGHT, (real *)(&(X.X[0].n)),
//VELOCITY_COLOUR, (real *)(&(X.X[0].v)),
//false,
//GRAPH_TOTAL_N_II, &X); // ok what we did here? we thought we'd colour with velocity .. but we haven't given ourselves room for 3 temp vars so drop this for now.
pVertex = X.X;
pdata = X.pData + BEGINNING_OF_CENTRAL;
for (iVertex = 0; iVertex < NUMVERTICES; iVertex++)
{
if ((pVertex->flags == DOMAIN_VERTEX) || (pVertex->flags == OUTERMOST))
{
pdata->temp.x = (pdata->n_n*pdata->Tn
+ pdata->n*(pdata->Ti + pdata->Te)) /
(pdata->n_n + pdata->n + pdata->n);
}
++pVertex;
++pdata;
}
Graph[3].TickRescaling = 1.0 / kB_;
Graph[3].DrawSurface("sum[n_s T_s]/sum[n_s]",
DATA_HEIGHT, (real *)(&(X.pData[0].temp.x)),
SEGUE_COLOUR, (real *)(&(X.pData[0].temp.x)),
false,
GRAPH_TOTAL_T, &X);
Graph[3].TickRescaling = 1.0;
break;
};
// Graph 2 and 4, in case of species graphs:
switch (iGraphsFlag) {
//case SPECIES_NEUTRAL:
case SPECIES_ION:
//case SPECIES_ELEC:
//case TOTAL:
int offset_v, offset_T;
offset_v = (real *)(&(X.pData[0].vxy)) - (real *)(&(X.pData[0]));
offset_T = (real *)(&(X.pData[0].Te)) - (real *)(&(X.pData[0]));
Graph[2].SetEyePlan(GlobalPlanEye);
Graph[2].boolDisplayMeshWireframe = true;
Graph[2].boolClearZBufferBeforeWireframe = true;
Graph[2].boolDisplayMainMesh = true;
Graph[2].boolDisplayInnerMesh = false;
Graph[2].boolDisplayScales = false;
if (GlobalColoursPlanView == 0)
{
// nothing
Graph[2].mhTech = Graph[2].mFX->GetTechniqueByName("MeshTech");
Graph[2].SetDataWithColour(X, FLAG_COLOUR_MESH, FLAG_FLAT_MESH, 0, 0,
GRAPH_FLAT_WIRE_MESH);
Graph[2].Render(buff, GlobalRenderLabels, &X);
} else {
// Tell SDWC not to mess with colourmax if it's a flat mesh.
if (GlobalColoursPlanView == 1)
{
// velocity
Graph[2].mhTech = Graph[2].mFX->GetTechniqueByName("VelociTech");
Graph[2].colourmax = Graph[0].colourmax; // match colours
Graph[2].SetDataWithColour(X, FLAG_VELOCITY_COLOUR, FLAG_FLAT_MESH, offset_v, offset_v,
GRAPH_FLAT_WIRE_MESH);
Graph[2].Render(buff, GlobalRenderLabels, &X);
};
////else {
//// // temperature
//// Graph[2].mhTech = Graph[2].mFX->GetTechniqueByName("SegueTech");
//// // SegueVS should take maximum as a parameter;
//// // at least for colours we should prefer an absolute scale for T
//// // Is it ever used for anything else? Not so far? eps?
//// Graph[2].SetDataWithColour(X, FLAG_SEGUE_COLOUR, FLAG_FLAT_MESH, offset_T, offset_T,
//// GRAPH_FLAT_WIRE_MESH);
//// Graph[2].Render(buff, GlobalRenderLabels, &X);
////};
};
// =================================================================================
printf("\ngot to here; graph [4]:\n\n");
Graph[4].boolDisplayKeyButton = false; // it's temperature
Graph[4].SetEyePlan(GlobalPlanEye);
Graph[4].boolDisplayMeshWireframe = true;
Graph[4].boolClearZBufferBeforeWireframe = true;
Graph[4].boolDisplayMainMesh = true;
Graph[4].boolDisplayInnerMesh = false;
Graph[4].boolDisplayScales = false;
Graph[4].mhTech = Graph[4].mFX->GetTechniqueByName("SegueTech");
Graph[4].SetDataWithColour(X, FLAG_SEGUE_COLOUR, FLAG_FLAT_MESH, offset_T, offset_T,
GRAPH_FLAT_WIRE_MESH);
Graph[4].Render(buff, GlobalRenderLabels, &X);
break;
}
printf("End of Refreshgraphs\n");
}
IMFSinkWriter *pSinkWriter[NUMAVI];
DWORD izStream[NUMAVI];
HRESULT hresult;
LONGLONG rtStart = 0;
int main()
{
printf("hello\n");
HINSTANCE hInstance = GetModuleHandle(NULL);
HWND hwndConsole = GetConsoleWindow();
WCHAR szInitialFilenameAvi[512];
MSG msg;
HDC hdc;
// HACCEL hAccelTable;
real x, y, temp;
int i, j;
float a1, a2, a3, a4;
//HWND hwndConsole;
FILE * fp;
extern char Functionalfilename[1024];
int nDevices, iWhich;
cudaDeviceProp prop;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
if (prop.memoryBusWidth == 384) iWhich = i;
}
printf("Picked %d \n", iWhich);
getch();
cudaSetDevice(iWhich); // K40?
cudaDeviceReset();
size_t uFree, uTotal;
cudaMemGetInfo(&uFree, &uTotal);
printf("Memory on device: uFree %zd uTotal %zd\n", uFree, uTotal);
HRESULT hr = CoInitializeEx(NULL, COINIT_APARTMENTTHREADED);
if (!SUCCEEDED(hr)) {
printf("CoInitializeEx failed. press p\n");
while (getch() != 'p');
exit(23233);
}
hr = MFStartup(MF_VERSION);
if (!SUCCEEDED(hr)) {
printf("MFStartup failed. press p\n");
while (getch() != 'p');
exit(23234);
}
h = TIMESTEP;
evaltime = 0.0; // gets updated before advance
memset(Historic_powermax, 0, 200 * sizeof(int));
memset(Historic_powermin, 0, 200 * sizeof(int));
ZeroMemory(Historic_max, 512 * HISTORY * sizeof(float));
ZeroMemory(Historic_min, 512 * HISTORY * sizeof(float));
GlobalStepsCounter = 0; steps_remaining = 0; steps_remaining_CPU = 0;
SetConsoleTitle("2D 1/16 annulus DPF simulation");
Sleep(40);
//hwndConsole = FindWindow(NULL, "2D 1/16 annulus DPF simulation");
MoveWindow(hwndConsole, 0, 0, SCREEN_WIDTH - VIDEO_WIDTH - 10, SCREEN_HEIGHT - 30, TRUE);
report_time(0);
int filetag = 0;
do {
filetag++;
sprintf(Functionalfilename, FUNCTIONALFILE_START "%03d.txt", filetag);
} while ((_access(Functionalfilename, 0)) != -1);
printf("\n\nopening %s \n", Functionalfilename);
fp = fopen(Functionalfilename, "w");
if (fp == 0) {
printf("error with %s \n", Functionalfilename);
getch();
}
else {
printf("opened %s \n", Functionalfilename);
};
fprintf(fp, "GSC evaltime Area neut.N ion.N elec.N neut.r ion.r elec.r SDneut.r SDion.r SDelec.r "
" neut.vr neut.vth neut.vz ion.vr ion.vth ion.vz elec.vr elec.vth elec.vz neut.heat ion.heat elec.heat neut.T ion.T elec.T "
" neut.mnvv/3 ion.mnvv/3 elec.mnvv/3 elec.force(vxB)r within3.6 elec.Bth EE BB Heatings and dT changes - see code \n");
fclose(fp);
X1.Initialise(1); // Set evaltime first
X2.Initialise(2);
X3.Initialise(3);
printf("Got to here 1\n");
{
X4.Initialise(4);
printf("Got to here 2\n");
X4.CreateTilingAndResequence2(&X1);
X4.CreateTilingAndResequence2(&X2);
X4.CreateTilingAndResequence2(&X3);
printf("Got to here 3\n");
//
// Dropping it for now so we can pursue solving equations first.
//
}
X1.Recalculate_TriCentroids_VertexCellAreas_And_Centroids();
X1.EnsureAnticlockwiseTriangleCornerSequences_SetupTriMinorNeighboursLists();
X1.SetupMajorPBCTriArrays();
X2.Recalculate_TriCentroids_VertexCellAreas_And_Centroids();
X2.EnsureAnticlockwiseTriangleCornerSequences_SetupTriMinorNeighboursLists();
X2.SetupMajorPBCTriArrays();
X3.Recalculate_TriCentroids_VertexCellAreas_And_Centroids();
X3.EnsureAnticlockwiseTriangleCornerSequences_SetupTriMinorNeighboursLists();
X3.SetupMajorPBCTriArrays();
printf("Got to here 4\n");
X1.InitialPopulate();
X2.InitialPopulate();
X3.InitialPopulate();
X1.Create4Volleys();
X2.Create4Volleys();
X3.Create4Volleys();
pTriMesh = &X1;
pX = &X1;
pXnew = &X2;
GlobalBothSystemsInUse = 0;
printf(report_time(1));
printf("\n");
report_time(0);
// Window setup
LoadString(hInstance, IDS_APP_TITLE, szTitle, 1024);
LoadString(hInstance, IDC_F2DVALS, szWindowClass, 1024);
wcex.cbSize = sizeof(WNDCLASSEX);
wcex.style = CS_HREDRAW | CS_VREDRAW;
wcex.lpfnWndProc = WndProc;
wcex.cbClsExtra = 0;
wcex.cbWndExtra = 0;
wcex.hInstance = hInstance;
wcex.hIcon = LoadIcon(hInstance, MAKEINTRESOURCE(IDI_F2DVALS));
wcex.hCursor = LoadCursor(NULL, IDC_ARROW);
wcex.hbrBackground = (HBRUSH)(COLOR_WINDOW + 1);
wcex.lpszMenuName = MAKEINTRESOURCE(IDR_MENU1);
wcex.lpszClassName = szWindowClass;
wcex.hIconSm = LoadIcon(wcex.hInstance, MAKEINTRESOURCE(IDI_SMALL));
if (RegisterClassEx(&wcex) == 0) {
char buff[128];
MessageBox(NULL, "RegisterClassEx failed", itoa(GetLastError(), buff, 10), MB_OK);
};
printf("SCREEN_WIDTH %d VIDEO_WIDTH %d VIDEO_HEIGHT %d \n",
SCREEN_WIDTH, VIDEO_WIDTH, VIDEO_HEIGHT);
hWnd = CreateWindowEx(NULL, szWindowClass, szTitle, WS_BORDER | WS_POPUP,
SCREEN_WIDTH - VIDEO_WIDTH - 5, 0, VIDEO_WIDTH + 5, VIDEO_HEIGHT + 20, NULL, NULL, hInstance, NULL);
if (!hWnd) {
DWORD dword = GetLastError();
char buff[128];
MessageBox(NULL, "CreateWindowEx failed", itoa(dword, buff, 10), MB_OK);
return dword;
}
// This is sending a message to WndProc before any of the following happens.
ShowWindow(hWnd, SW_SHOWNORMAL);
UpdateWindow(hWnd);
hwndGraphics = hWnd;
xzscale = 2.0 / 0.1; // very zoomed in. Now what?
DXChk(Direct3D.Initialise(hWnd, hInstance, VIDEO_WIDTH, VIDEO_HEIGHT));
// With Field Of View = PI/4 used this:
/*
GlobalEye.x = 0.0f;
GlobalEye.y = 12.4f; //7.2f;
GlobalEye.z = -18.0f + 2.5*xzscale;//DEVICE_RADIUS_INSULATOR_OUTER*xzscale;//-17.8f+
GlobalLookat.x = 0.4f;
GlobalLookat.y = 3.0f;
GlobalLookat.z = DEVICE_RADIUS_INITIAL_FILAMENT_CENTRE * xzscale;
GlobalPlanEye.x = 0.0f;
GlobalPlanEye.y = 35.0f;
GlobalPlanEye.z = (3.44 + 4.1)*0.5*xzscale;
GlobalPlanEye2.x = -0.1f;
GlobalPlanEye2.y = 19.5f;
GlobalPlanEye2.z = 2.8*xzscale;
GlobalPlanLookat.x = GlobalPlanEye.x;
GlobalPlanLookat.y = 0.0f;
GlobalPlanLookat.z = GlobalPlanEye.z + 0.0001;
GlobalPlanLookat2.x = GlobalPlanEye2.x;
GlobalPlanLookat2.y = 0.0f;
GlobalPlanLookat2.z = GlobalPlanEye2.z + 0.0001;*/
GlobalEye.x = -10.4f;
GlobalEye.y = 16.4f; //7.2f;
GlobalEye.z = 44.0f;
GlobalLookat.x = 1.20f;
GlobalLookat.y = 3.0f;
GlobalLookat.z = 72.2f;
GlobalPlanEye.x = 2.9f;
GlobalPlanEye.y = 17.97f;
GlobalPlanEye.z = 71.95f;
GlobalPlanEye2.x = -0.1f;
GlobalPlanEye2.y = 19.5f;
GlobalPlanEye2.z = 2.8*xzscale;
GlobalPlanLookat.x = GlobalPlanEye.x;
GlobalPlanLookat.y = 0.0f;
GlobalPlanLookat.z = GlobalPlanEye.z + 0.0001;
GlobalPlanLookat2.x = GlobalPlanEye2.x;
GlobalPlanLookat2.y = 0.0f;
GlobalPlanLookat2.z = GlobalPlanEye2.z + 0.0001;
newEye.x = 0.0f;
newEye.y = 0.1f;
newEye.z = 40.0f;
newLookat.x = 0.0f;
newLookat.y = 0.0f;
newLookat.z = 72.0f;
// Add vectors in parallel.
cudaError_t cudaStatus;
if (DXChk(Graph[0].InitialiseWithoutBuffers(0, 0, GRAPH_WIDTH, GRAPH_HEIGHT, GlobalEye, GlobalLookat)) +
DXChk(Graph[0].InitialiseBuffers(X1))
)
{
PostQuitMessage(200);
};
if (DXChk(Graph[1].InitialiseWithoutBuffers(0, GRAPH_HEIGHT, GRAPH_WIDTH, GRAPH_HEIGHT, GlobalEye, GlobalLookat)) +
DXChk(Graph[1].InitialiseBuffers(X1))
)
{
PostQuitMessage(201);
};
if (DXChk(Graph[2].InitialiseWithoutBuffers(GRAPH_WIDTH, 0, GRAPH_WIDTH, GRAPH_HEIGHT, GlobalPlanEye, GlobalPlanLookat)) +
DXChk(Graph[2].InitialiseBuffers(X1))
)
{
PostQuitMessage(202);
};
if (DXChk(Graph[3].InitialiseWithoutBuffers(GRAPH_WIDTH, GRAPH_HEIGHT, GRAPH_WIDTH, GRAPH_HEIGHT, GlobalEye, GlobalLookat)) +
DXChk(Graph[3].InitialiseBuffers(X1))
)
{
PostQuitMessage(203);
};
if (NUMGRAPHS > 4) {
if (DXChk(Graph[4].InitialiseWithoutBuffers(GRAPH_WIDTH * 2, 0, GRAPH_WIDTH, GRAPH_HEIGHT, GlobalPlanEye, GlobalPlanLookat)) +
DXChk(Graph[4].InitialiseBuffers(X1))
)
{
PostQuitMessage(204);
};
if (DXChk(Graph[5].InitialiseWithoutBuffers(GRAPH_WIDTH * 2, GRAPH_HEIGHT, GRAPH_WIDTH, GRAPH_HEIGHT, GlobalEye, GlobalLookat)) +
DXChk(Graph[5].InitialiseBuffers(X1))
)
{
PostQuitMessage(204);
};
if (DXChk(Graph[6].InitialiseWithoutBuffers(0, 0, GRAPH_WIDTH*2, GRAPH_HEIGHT, newEye, GlobalLookat, true)) +
DXChk(Graph[6].InitialiseBuffers(X1))
)
{
PostQuitMessage(204);
};
if (DXChk(Graph[7].InitialiseWithoutBuffers(0, GRAPH_HEIGHT, GRAPH_WIDTH * 2, GRAPH_HEIGHT, newEye, GlobalLookat, true)) +
DXChk(Graph[7].InitialiseBuffers(X1))
)
{
PostQuitMessage(204);
};
};
Graph[0].bDisplayTimestamp = false;
Graph[1].bDisplayTimestamp = false;
Graph[2].bDisplayTimestamp = false;
Graph[3].bDisplayTimestamp = false;
Graph[4].bDisplayTimestamp = true;
Graph[5].bDisplayTimestamp = false;
Graph[6].bDisplayTimestamp = true;
Graph[7].bDisplayTimestamp = false;
Direct3D.pd3dDevice->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO, &p_backbuffer_surface);
if (DXChk(p_backbuffer_surface->GetDC(&surfdc), 1000))
MessageBox(NULL, "GetDC failed", "oh dear", MB_OK);
surfbit = CreateCompatibleBitmap(surfdc, VIDEO_WIDTH, VIDEO_HEIGHT); // EXTRAHEIGHT = 90
SelectObject(surfdc, surfbit);
dibdc = CreateCompatibleDC(surfdc);
long VideoWidth = VIDEO_WIDTH;
long VideoHeight = VIDEO_HEIGHT;
// pasted here just to set up format:
bitmapinfo.bmiHeader.biSize = sizeof(BITMAPINFO);
bitmapinfo.bmiHeader.biWidth = VideoWidth;
bitmapinfo.bmiHeader.biHeight = VideoHeight;
bitmapinfo.bmiHeader.biPlanes = 1;
bitmapinfo.bmiHeader.biBitCount = 24;
bitmapinfo.bmiHeader.biCompression = BI_RGB; // uncompressed
bitmapinfo.bmiHeader.biSizeImage = bitmapinfo.bmiHeader.biHeight;
bitmapinfo.bmiHeader.biXPelsPerMeter = 3000;
bitmapinfo.bmiHeader.biYPelsPerMeter = 3000;
bitmapinfo.bmiHeader.biClrUsed = 0;
bitmapinfo.bmiHeader.biClrImportant = 0;
bitmapinfo.bmiColors->rgbBlue = 0;
bitmapinfo.bmiColors->rgbRed = 0;
bitmapinfo.bmiColors->rgbGreen = 0;
bitmapinfo.bmiColors->rgbReserved = 0;
// dimension DIB and set up pointer to bits
dib = CreateDIBSection(dibdc, &bitmapinfo, DIB_RGB_COLORS, &lpvBits, NULL, 0);
SelectObject(dibdc, dib);
BitBlt(dibdc, 0, 0, VIDEO_WIDTH, VIDEO_HEIGHT, surfdc, 0, 0, SRCCOPY);
rtStart = 0; // timeslice : where to place frames into mp4 files.
for (i = 0; i < NUMAVI; i++)
{
swprintf(szInitialFilenameAvi, L"%s%s_%s", FOLDER, szmp4[i], INITIALMP4);
pSinkWriter[i] = NULL;
hr = InitializeSinkWriter(&(pSinkWriter[i]), &(izStream[i]), szInitialFilenameAvi);
if (!SUCCEEDED(hr)) {
printf("Failed to create mp4 file %d %ls \n", i, szmp4[i]);
}
// hAvi[i] = CreateAvi(szInitialFilenameAvi, AVIFRAMEPERIOD, NULL);
//if (hAvi[i] == 0) {
// printf("Failed to create avi file %d", i);
// getch(); getch(); getch();
//}
};
printf("got to here: Initialized SinkWriters \n");
getch();
// 1000/25 = 40
//ZeroMemory(&opts, sizeof(opts));
//opts.fccHandler = mmioFOURCC('D', 'I', 'B', ' ');//('d','i','v','x');
//opts.dwFlags = 8;
//for (i = 0; i < NUMAVI; i++)
//{
// hresult = SetAviVideoCompression(hAvi[i], dib, &opts, false, hWnd); // always run this for every avi file but can
// // call with false as long as we know opts contains valid information.
// if (hresult != 0) {
// printf("error: i = %d, hresult = %d", i, (long)hresult);
// getch(); getch(); getch();
// };
//};
counter = 0;
//ReleaseDC(hWnd,surfdc);
p_backbuffer_surface->ReleaseDC(surfdc);
GlobalCutaway = true; // dies if true
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
// Main message loop:
memset(&msg, 0, sizeof(MSG));
while (msg.message != WM_QUIT)
{
if (PeekMessage(&msg, NULL, 0U, 0U, PM_REMOVE))
{
TranslateMessage(&msg);
DispatchMessage(&msg);
} else {
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
};
};
UnregisterClass(szWindowClass, wcex.hInstance);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
/* Auxiliary routine: printing a matrix */
void print_matrix(char* desc, lapack_int m, lapack_int n, double* a, lapack_int lda) {
lapack_int i, j;
printf("\n %s\n", desc);
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) printf(" %2.5E", a[i*lda + j]);
printf("\n");
}
}
/* Auxiliary routine: printing a vector of integers */
void print_int_vector(char* desc, lapack_int n, lapack_int* a) {
lapack_int j;
printf("\n %s\n", desc);
for (j = 0; j < n; j++) printf(" %6i", a[j]);
printf("\n");
}
LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam)
{
f64 lowest_vez;
long iLow, iMinor;
Triangle * pTri;
Vertex * pVertex;
long izTri[128];
static bool bInvoked_cuSyst = false;
static long GSCCPU = 0;
int iAntiskips;
int wmId, wmEvent;
int i, j, ctr;
PAINTSTRUCT ps;
HDC hdc;
real time_back_for_Adot;
FILE * file, *fp;
int maxeerr, count, iMin;
WCHAR buf1000[1024];
char buf1001[1024];
int attempts;
real store_h;
char ch, o;
int failed;
RECT rect;
real TotalArea, TotalCharge;
long iVertex;
real mass_avg, mass_SD, mass_min, mass_max;
OPENFILENAME ofn; // common dialog box structure
char szFile[260]; // buffer for file name
char szFilter[1000]; // buffer for file filter
char szfilter[256];
char buffer[256];
TriMesh * temp;
static const real XCENTRE2 = DEVICE_RADIUS_INITIAL_FILAMENT_CENTRE * sin(PI / 32.0);
static const real XCENTRE1 = -XCENTRE2;
static const real YCENTRE = DEVICE_RADIUS_INITIAL_FILAMENT_CENTRE * cos(PI / 32.0);
switch (message)
{
case WM_CREATE:
// Don't ever try doing initialisation here;
// That should be done manually from the menus.
break;
case WM_COMMAND:
wmId = LOWORD(wParam);
wmEvent = HIWORD(wParam);
printf("\nWM_COMMAND: wmId %d\n\n", wmId);
// Ensure that display menu items are consecutive IDs.
// Parse the menu selections:
switch (wmId)
{
case ID_DISPLAY_ONE_D:
// printf("\a\n");
// Don't know why resource.h is not working;
// Maybe some #define overwrites it with 40024.
//wmId += 50007 - 40024;
GlobalSpeciesToGraph = ONE_D;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
// int const GraphFlags[NUMAVI] = { SPECIES_ION, OVERALL, JZAZBXYEZ, OHMSLAW, ONE_D, IONIZEGRAPH };
case ID_DISPLAY_ION:
GlobalSpeciesToGraph = SPECIES_ION;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_TOTAL:
GlobalSpeciesToGraph = OVERALL;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_JZAZBXYEZ:
GlobalSpeciesToGraph = JZAZBXYEZ;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_IONIZEGRAPHS:
GlobalSpeciesToGraph = IONIZEGRAPH;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_OHMS:
GlobalSpeciesToGraph = OHMSLAW;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_DTGRAPH:
GlobalSpeciesToGraph = DTGRAPH;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_ACCELGRAPH:
GlobalSpeciesToGraph = ACCELGRAPHS;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_TENSOROHMS:
GlobalSpeciesToGraph = OHMS2;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_ACCELRELZ:
GlobalSpeciesToGraph = ARELZ;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_DISPLAY_SIGMAEJ:
i = wmId - ID_DISPLAY_NEUT;
GlobalSpeciesToGraph = i;
printf("\nGlobalSpeciesToGraph = %d \n", GlobalSpeciesToGraph);
RefreshGraphs(*pX, GlobalSpeciesToGraph);
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_HELP_ABOUT:
DialogBox(hInst, MAKEINTRESOURCE(IDD_ABOUTBOX), hWnd, About);
break;
case ID_FILE_EXIT:
DestroyWindow(hWnd);
break;
case ID_FILE_SAVECAMERA:
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hWnd;
ofn.lpstrFile = szFile;
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
memcpy(szfilter, "All\0*.*\0cam\0*.CAM\0\0", 19); // strcpy stops at first null !!
ofn.lpstrFilter = szfilter; //"All\0*.*\0Dat\0*.DAT\0\0"; // summat weird about that example code
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = NULL;
ofn.Flags = OFN_OVERWRITEPROMPT;
ofn.lpstrTitle = NULL;
if (GetSaveFileName(&ofn) == TRUE)
{
printf("\nsaving camera...");
fp = fopen(ofn.lpstrFile, "wt");
if (fp == 0) {
printf("save failed.\n");
}
else {
fprintf(fp, "%f %f %f ", GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
fprintf(fp, "%f %f %f ", GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
fprintf(fp, "%f %f %f ", GlobalEye.x, GlobalEye.y, GlobalEye.z);
fprintf(fp, "%f %f %f ", GlobalPlanLookat.x, GlobalPlanLookat.y, GlobalPlanLookat.z);
fclose(fp);
printf("done\n");
};
}
else {
printf("there was an issue\n");
};
break;
case ID_FILE_LOADCAMERA:
// Initialize OPENFILENAME
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hWnd;
ofn.lpstrFile = szFile;
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
memcpy(szfilter, "All\0*.*\0*.cam\0*.Cam\0\0", 21); // strcpy stops at first null !!
ofn.lpstrFilter = szfilter; //"All\0*.*\0*.Dat\0*.DAT\0\0"; // summat weird about that example code
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = NULL;
ofn.Flags = OFN_PATHMUSTEXIST | OFN_FILEMUSTEXIST;
// Display the Open dialog box.
if (GetOpenFileName(&ofn) == TRUE)
{
printf("\nloading camera...");
fp = fopen(ofn.lpstrFile, "rt");
if (fp == 0) {
printf("failed.\n");
}
else {
rewind(fp);
fscanf(fp, "%f %f %f ", &(GlobalPlanEye.x), &(GlobalPlanEye.y), &(GlobalPlanEye.z));
fscanf(fp, "%f %f %f ", &(GlobalLookat.x), &(GlobalLookat.y), &(GlobalLookat.z));
fscanf(fp, "%f %f %f ", &(GlobalEye.x), &(GlobalEye.y), &(GlobalEye.z));
fscanf(fp, "%f %f %f ", &(GlobalPlanLookat.x), &(GlobalPlanLookat.y), &(GlobalPlanLookat.z));
fclose(fp);
};
RefreshGraphs(*pX, GlobalSpeciesToGraph); // sends data to graphs AND renders them
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
}
else {
printf("file error camera\n");
};
break;
case ID_FILE_LOADGPU:
// Initialize OPENFILENAME:
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hWnd;
ofn.lpstrFile = szFile;
//
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
//strcpy(szFilter, "All\0*.*\0Dat\0*.DAT\0\0");
memcpy(szfilter, "All\0*.*\0Dat\0*.DAT\0\0", 19); // strcpy stops at first null !!
ofn.lpstrFilter = szfilter; //"All\0*.*\0Dat\0*.DAT\0\0"; // summat weird about that example code
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = NULL;
ofn.Flags = OFN_PATHMUSTEXIST | OFN_FILEMUSTEXIST;
// Display the Open dialog box.
if (GetOpenFileName(&ofn) == TRUE)
{
if (bInvoked_cuSyst == false) {
bInvoked_cuSyst = true;
pX->EnsureAnticlockwiseTriangleCornerSequences_SetupTriMinorNeighboursLists();
pX->Average_n_T_to_tris_and_calc_centroids_and_minorpos();
pX->Create4Volleys(); // THIS SHOULD NOT ALWAYS BE HERE !!
printf("Called Create4Volleys! This should be removed in favour of loaded iVolley.\n");
cuSyst_host.InvokeHost();
cuSyst_host.PopulateFromTriMesh(pX);
cuSyst_host2.InvokeHost();
cuSyst_host2.PopulateFromTriMesh(pX);
// transfer information.
PerformCUDA_Invoke_Populate(
&cuSyst_host,
NUMVERTICES,
pX->InnermostFrillCentroidRadius,
pX->OutermostFrillCentroidRadius,
pX->numStartZCurrentTriangles,
pX->numEndZCurrentTriangles);
};
cuSyst_host.Load(ofn.lpstrFile);
};
printf("Populate *pX\n");
cuSyst_host.PopulateTriMesh(pX);
printf("send to device\n");
cuSyst_host.SendToDevice(cuSyst1);
printf("done\n");
// Debug: redelaun on load:
pX->RefreshVertexNeighboursOfVerticesOrdered();
// pX->Redelaunerize(true, true);
// This isn't actually helpful?
// pX->RefreshVertexNeighboursOfVerticesOrdered();
// pX->X[89450-BEGINNING_OF_CENTRAL].GetTriIndexArray(izTri);
// printf("89450 : %d %d %d %d %d %d \n",
// izTri[0], izTri[1], izTri[2], izTri[3], izTri[4], izTri[5]);
//
pX->EnsureAnticlockwiseTriangleCornerSequences_SetupTriMinorNeighboursLists();
// pX->Average_n_T_to_tris_and_calc_centroids_and_minorpos(); // Obviates some of our flip calcs to replace tri n,T
// not sure if needed .. just for calc centroid .. they do soon get wiped out anyway.
cuSyst_host.PopulateFromTriMesh(pX);
cuSyst_host.SendToDevice(cuSyst1); // check this is right
cuSyst2.CopyStructuralDetailsFrom(cuSyst1);
cuSyst3.CopyStructuralDetailsFrom(cuSyst1);
// Let's assume these always carry through during GPU runs.
// It certainly does not work as it stands if you don't populate them all the same, put it that way!!
printf("sent back re-delaunerized system\n");
break;
case ID_FILE_SAVEBINARY:
// Initialize OPENFILENAME
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hWnd;
ofn.lpstrFile = szFile;
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
memcpy(szfilter, "All\0*.*\0*.dat\0*.Dat\0\0", 21); // strcpy stops at first null !!
ofn.lpstrFilter = szfilter; //"All\0*.*\0Dat\0*.DAT\0\0"; // summat weird about that example code
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = NULL;
ofn.Flags = OFN_OVERWRITEPROMPT;
ofn.lpstrTitle = NULL;
// Display the Open dialog box.
if (GetSaveFileName(&ofn) == TRUE)
{
printf("\nsaving system...");
pX->Save(ofn.lpstrFile);
printf("done\n");
}
else {
printf("there was an issue\n");
};
break;
case ID_FILE_SAVETEXT:
// Initialize OPENFILENAME
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hWnd;
ofn.lpstrFile = szFile;
//
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
//strcpy(szFilter,"All\0*.*\0Text\0*.TXT\0");
memcpy(szfilter, "All\0*.*\0Dat\0*.DAT\0\0", 19); // strcpy stops at first null !!
ofn.lpstrFilter = szfilter; //"All\0*.*\0Dat\0*.DAT\0\0"; // summat weird about that example code
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = NULL;
ofn.Flags = OFN_OVERWRITEPROMPT;
ofn.lpstrTitle = NULL;
// Display the Open dialog box.
if (GetSaveFileName(&ofn) == TRUE)
{
printf("\nsaving system...");
pX->SaveText(ofn.lpstrFile);
printf("done\n");
}
else {
printf("there was an issue\n");
};
break;
case ID_FILE_LOAD:
// Initialize OPENFILENAME:
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hWnd;
ofn.lpstrFile = szFile;
//
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
//strcpy(szFilter, "All\0*.*\0Dat\0*.DAT\0\0");
memcpy(szfilter, "All\0*.*\0Dat\0*.DAT\0\0", 19); // strcpy stops at first null !!
ofn.lpstrFilter = szfilter; //"All\0*.*\0Dat\0*.DAT\0\0"; // summat weird about that example code
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = NULL;
ofn.Flags = OFN_PATHMUSTEXIST | OFN_FILEMUSTEXIST;
// Display the Open dialog box.
if (GetOpenFileName(&ofn) == TRUE)
{
pX->Load(ofn.lpstrFile);
printf("\ndoing nothing...");
};
break;
case ID_RUN_SIMULATIONSTEPS:
GlobalSwitchBox = 0;
DialogBox(hInst, MAKEINTRESOURCE(IDD_DIALOG1), hWnd, SetupBox);
// that will not return with steps_remaining unset.
if (steps_remaining > 0)
SetTimer(hWnd, 1, 1, NULL); // 1 millisecond delay
break;
case ID_RUN_SIMULATIONSTEPS_CPU:
GlobalSwitchBox = 0;
steps_remaining_CPU = 1;
// that will not return with steps_remaining unset.
if (steps_remaining_CPU > 0)
SetTimer(hWnd, 2, 1, NULL); // 1 millisecond delay
break;
case ID_INITIALISE_ZAPTHEBACK:
Zap_the_back();
printf("done");
RefreshGraphs(*pX, GlobalSpeciesToGraph); // sends data to graphs AND renders them
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case ID_RUN_STOP:
steps_remaining = 0;
steps_remaining_CPU = 0;
break;
case ID_INITIALISE_IONISATIONSTEPS:
break;
default:
return DefWindowProc(hWnd, message, wParam, lParam);
}
break;
case WM_TIMER:
KillTimer(hWnd, wParam);
report_time(0);
if (wParam == 1)
{
if (bInvoked_cuSyst == false) {
bInvoked_cuSyst = true;
pX->EnsureAnticlockwiseTriangleCornerSequences_SetupTriMinorNeighboursLists();
pX->Average_n_T_to_tris_and_calc_centroids_and_minorpos();
//
// printf("tri 340: %d %d %d \n%1.14E %1.14E \n%1.14E %1.14E \n%1.14E %1.14E\n",
// pX->T[340].cornerptr[0] - pX->X, pX->T[340].cornerptr[1] - pX->X, pX->T[340].cornerptr[2] - pX->X,
// pX->T[340].cornerptr[0]->pos.x, pX->T[340].cornerptr[0]->pos.y,
// pX->T[340].cornerptr[1]->pos.x, pX->T[340].cornerptr[1]->pos.y,
// pX->T[340].cornerptr[2]->pos.x, pX->T[340].cornerptr[2]->pos.y);
// printf("tri 340 periodic %d \n", pX->T[340].periodic);
// getch();
cuSyst_host.InvokeHost();
cuSyst_host.PopulateFromTriMesh(pX);
cuSyst_host2.InvokeHost();
cuSyst_host2.PopulateFromTriMesh(pX);
// cuSyst_host.Output("n0.txt");
PerformCUDA_Invoke_Populate(
&cuSyst_host,
NUMVERTICES,
pX->InnermostFrillCentroidRadius,
pX->OutermostFrillCentroidRadius,
pX->numStartZCurrentTriangles,
pX->numEndZCurrentTriangles);
}
// Run 1 step:
printf("evaltime %1.9E\n", evaltime);
// PerformCUDA_RunStepsAndReturnSystem_Debug(&cuSyst_host, &cuSyst_host2, pX, &X3, pXnew);
PerformCUDA_RunStepsAndReturnSystem(&cuSyst_host);
// printf("Stamp GPU over CPU y/n:");
// do {
// o = getch();
// } while ((o != 'y') && (o != 'n'));
// printf("%c\n\n", o);
// if (o == 'y')
// Auto-save system:
if (GlobalStepsCounter % DATA_SAVE_FREQUENCY == 0)
{
sprintf(szFile, "auto%d.dat", GlobalStepsCounter);
// SAVE cuSyst:
cuSyst_host.Save(szFile);
}
// even number of steps should lead us back to pX having it
steps_remaining--;
GlobalStepsCounter++;
printf("Done steps: %d || Remaining this run: %d\n\n", GlobalStepsCounter, steps_remaining);
if ((GlobalStepsCounter % GRAPHICS_FREQUENCY == 0) ||
(GlobalStepsCounter % REDELAUN_FREQUENCY == 0) ||
(steps_remaining == 0))
{
cuSyst_host.PopulateTriMesh(pX); // vertex n is populated into the minor array available on CPU
printf("pulled back to host\n");
}
}
else {
pX->Advance(pXnew, &X3);
temp = pX;
pX = pXnew;
pXnew = temp;
steps_remaining_CPU--;
GSCCPU++;
printf("Done steps CPU: %d || Remaining this run: %d\n\n", GSCCPU, steps_remaining_CPU);
sprintf(buf1001, "autosaveCPU%d.dat", GSCCPU);
pX->Save(buf1001);
printf("saved as %s\n", buf1001);
};
printf("%s\n", report_time(1));
if (GlobalStepsCounter % GRAPHICS_FREQUENCY == 0)
{
// make video frames:
for (i = 0; i < NUMAVI; i++)
{
printf("i = %d \n", i);
RefreshGraphs(*pX, GraphFlags[i]); // sends data to graphs AND renders them
// ::PlanViewGraphs1(*pX);
printf(".DISHMOPS.\n");
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
printf("got to here 7\n");
if (DXChk(p_backbuffer_surface->GetDC(&surfdc), 100))
MessageBox(NULL, "GetDC failed", "oh dear", MB_OK);
//SelectObject(surfdc,surfbit);
BitBlt(dibdc, 0, 0, VIDEO_WIDTH, VIDEO_HEIGHT, surfdc, 0, 0, SRCCOPY);
p_backbuffer_surface->ReleaseDC(surfdc);
//GetDIBits(dibdc, dib, 0, VIDEO_HEIGHT, dwBits, &bmi, 0);
// just use lpvBits
// getting hAvi[i] == 0 for the last one.
// But on debug? No such thing? Same.
printf("Adding frame to %d : \n", i);
hresult = WriteFrame(pSinkWriter[i], izStream[i], rtStart);
//hresult = AddAviFrame(hAvi[i], dib);
if (hresult != 0) printf("\n******************************************************* \n"
"hresult = %d\n********************************************** \n", hresult);
};
rtStart += VIDEO_FRAME_DURATION;
// sprintf(szFile, "System_%d", GlobalStepsCounter);
// pX->SaveText(szFile);
};
if (GlobalStepsCounter % (AVI_FILE_PINCHOFF_FREQUENCY * GRAPHICS_FREQUENCY) == 0)
{
for (i = 0; i < NUMAVI; i++)
{
// now have to pinch out avi file and make a new one
pSinkWriter[i]->Finalize();
// CloseAvi(hAvi[i]);
swprintf(buf1000, L"%s%s_%d.mp4", FOLDER, szmp4[i], GlobalStepsCounter);
//hAvi[i] = CreateAvi(buf1000, AVIFRAMEPERIOD, NULL);
SafeRelease(&pSinkWriter[i]);
pSinkWriter[i] = NULL;
rtStart = 0;
if (!SUCCEEDED(InitializeSinkWriter(&(pSinkWriter[i]), &(izStream[i]), szmp4[i])))
{
printf("Failed to create %ls \n", buf1000);
getch();
}
//hresult = SetAviVideoCompression(hAvi[i], dib, &opts, false, hWnd);
//if (hresult != 0) printf("\n******************************************************* \n"
// "SetAviVideoCompression: hresult = %d\n********************************************** \n", hresult);
};
};
RefreshGraphs(*pX,GlobalSpeciesToGraph); // sends data to graphs AND renders them
Direct3D.pd3dDevice->Present( NULL, NULL, NULL, NULL );
if (GlobalStepsCounter % REDELAUN_FREQUENCY == 0)
{
Setup_residual_array(); // We have not specifically checked that cuSyst1 is the
// most up-to-date, but it doesn't matter really.
pX->RefreshVertexNeighboursOfVerticesOrdered();
long iFlips = pX->Redelaunerize(true, true);
// Send back to GPU:
pX->EnsureAnticlockwiseTriangleCornerSequences_SetupTriMinorNeighboursLists();
// Appears in lots of places so hard to believe data is not updated.
// There is no wrapping on GPU?... or is there?
// Need to debug inside routine and find out what it is doing for these two triangles.
// pX->Average_n_T_to_tris_and_calc_centroids_and_minorpos(); // Obviates some of our flip calcs to replace tri n,T
// not sure if needed .. just for calc centroid .. they do soon get wiped out anyway.
cuSyst_host.PopulateFromTriMesh(pX);// 1. Does it update lists? --- some had to be updated on CPU first.
// Seems to copy structural information as well as data. n is copied from n_minor on CPU.
//cuSyst1.SendToHost(cuSyst_host2);
//cuSyst_host.ReportDifferencesHost(cuSyst_host2);
cuSyst_host.SendToDevice(cuSyst1);
cuSyst2.CopyStructuralDetailsFrom(cuSyst1);
cuSyst3.CopyStructuralDetailsFrom(cuSyst1);
// Let's assume these always carry through during GPU runs.
// It certainly does not work as it stands if you don't populate them all the same, put it that way!!
// We don't actually know which system is pointed to by pX1 that is the initial system
// for the steps --- so just copy it over all of them
cuSyst_host.SendToDevice(cuSyst2);
cuSyst_host.SendToDevice(cuSyst3);
// There almost certainly is a better way. But this is unimportant for now.
printf("sent back re-delaunerized system\n");
// Now reset A values more carefully in the sent-back system:
if (iFlips == 0) {
printf(" NO DELAUNAY FLIPS");
for (int sj = 0; sj < 10; sj++) printf("-\n");
}
if (iFlips > 0) {
Go_visit_the_other_file();
};
};
if (steps_remaining > 0) {
SetTimer(hWnd, 1, DELAY_MILLISECS, NULL);
printf("Waiting %d milliseconds to allow user input.\n", DELAY_MILLISECS);
};
if (steps_remaining_CPU > 0) {
SetTimer(hWnd, 2, DELAY_MILLISECS, NULL);
printf("Waiting %d milliseconds to allow user input.\n", DELAY_MILLISECS);
};
/*
if (wParam == 1) {
sprintf(buf1000, "autosaveGPU%d.dat", GlobalStepsCounter);
} else {
sprintf(buf1000, "autosaveCPU%d.dat", GSCCPU);
}
pX->Save(buf1000);
printf("saved as %s\n", buf1000);
lowest_vez = 0.0;
iLow = 0;
pTri = pX->T;
for (iMinor = 0; iMinor < BEGINNING_OF_CENTRAL; iMinor++)
{
if ((pTri->u8domain_flag == DOMAIN_TRIANGLE) && (pX->pData[iMinor].vez < lowest_vez)) {
lowest_vez = pX->pData[iMinor].vez;
iLow = iMinor;
}
++pTri;
}
printf("Tris: lowest_vez %1.14E iLow %d \n", lowest_vez, iLow);
iLow = 0;
lowest_vez = 0.0;
pVertex = pX->X;
for (; iMinor < NMINOR; iMinor++)
{
if ((pVertex->flags == DOMAIN_VERTEX) && (pX->pData[iMinor].vez < lowest_vez)) {
lowest_vez = pX->pData[iMinor].vez;
iLow = iMinor;
}
++pVertex;
}
printf("Vertices: lowest_vez %1.14E iLow %d \n\n", lowest_vez, iLow);
printf("save ascii?");
do {
o = getch();
} while ((o != 'y') && (o != 'n'));
printf("%c\n", o);
if (o == 'y') {
sprintf(buf1000, "SaveGPUtext1_trackedAA");
pX->SaveText(buf1000);
printf("Ascii file saved %s.\n",buf1000);
}
*/
printf("steps_remaining GPU: %d CPU: %d\n",steps_remaining, steps_remaining_CPU);
break;
case WM_KEYDOWN:
switch (wParam)
{
case 'W':
GlobalEye.z += 1.0f;
printf("GlobalEye %f %f %f \n",
GlobalEye.x, GlobalEye.y, GlobalEye.z);
break;
case 'S':
GlobalEye.z -= 1.0f;
printf("GlobalEye %f %f %f \n",
GlobalEye.x, GlobalEye.y, GlobalEye.z);
break;
case 'A':
GlobalEye.x -= 0.8f;
printf("GlobalEye %f %f %f \n",
GlobalEye.x, GlobalEye.y, GlobalEye.z);
break;
case 'D':
GlobalEye.x += 0.8f;
printf("GlobalEye %f %f %f \n",
GlobalEye.x, GlobalEye.y, GlobalEye.z);
break;
case 'E':
GlobalEye.y += 0.8f;
printf("GlobalEye %f %f %f \n",
GlobalEye.x, GlobalEye.y, GlobalEye.z);
break;
case 'C':
GlobalEye.y -= 0.8f;
printf("GlobalEye %f %f %f \n",
GlobalEye.x, GlobalEye.y, GlobalEye.z);
break;
case 'V':
GlobalLookat.z -= 0.4f;
printf("GlobalEye %f %f %f GlobalLookat %f %f %f\n",
GlobalEye.x, GlobalEye.y, GlobalEye.z, GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
break;
case 'R':
GlobalLookat.z += 0.4f;
printf("GlobalEye %f %f %f GlobalLookat %f %f %f\n",
GlobalEye.x, GlobalEye.y, GlobalEye.z, GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
break;
case 'F':
GlobalLookat.x -= 0.4f;
printf("GlobalEye %f %f %f GlobalLookat %f %f %f\n",
GlobalEye.x, GlobalEye.y, GlobalEye.z, GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
break;
case 'G':
GlobalLookat.x += 0.4f;
printf("GlobalEye %f %f %f GlobalLookat %f %f %f\n",
GlobalEye.x, GlobalEye.y, GlobalEye.z, GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
break;
case 'T':
GlobalLookat.y += 0.4f;
printf("GlobalLookat %f %f %f\n",
GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
break;
case 'B':
GlobalLookat.y -= 0.4f;
printf("GlobalLookat %f %f %f\n",
GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
break;
case '+':
GlobalCutaway = !GlobalCutaway;
break;
case 'Y':
case '<':
GlobalEye.x = -10.4; GlobalEye.y = 16.4; GlobalEye.z = 44.0;
GlobalLookat.x = -3.6; GlobalLookat.y = 3.0; GlobalLookat.z = 72.2;
printf("GlobalEye %f %f %f GlobalLookat %f %f %f\n",
GlobalEye.x, GlobalEye.y, GlobalEye.z, GlobalLookat.x, GlobalLookat.y, GlobalLookat.z);
GlobalPlanEye.x = 7.1; GlobalPlanEye.y = 11.5; GlobalPlanEye.z = 71.35;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case '_':
case '-':
case '>':
GlobalPlanEye.x = 7.0; GlobalPlanEye.y = 14.0; GlobalPlanEye.z = 71.0;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case 'U':
GlobalPlanEye.z += 0.6f;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case 'J':
GlobalPlanEye.z -= 0.6f;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case 'H':
GlobalPlanEye.x -= 0.6f;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case 'K':
GlobalPlanEye.x += 0.6f;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case 'I':
GlobalPlanEye.y *= 1.25f;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case 'M':
GlobalPlanEye.y *= 0.8f;
printf("GlobalPlanEye %f %f %f\n",
GlobalPlanEye.x, GlobalPlanEye.y, GlobalPlanEye.z);
break;
case 'N':
GlobalboolDisplayMeshWireframe = !GlobalboolDisplayMeshWireframe;
//Graph1.boolDisplayMeshWireframe = (!(Graph1.boolDisplayMeshWireframe));
break;
case '9':
GlobalRenderLabels = false;
break;
case '5':
GlobalRenderLabels = true;
GlobalWhichLabels = 0;// iTri
break;
case '8':
GlobalRenderLabels = true;
GlobalWhichLabels = 1;//T
break;
case '7':
GlobalRenderLabels = true;
GlobalWhichLabels = 2;//v
break;
case '6':
GlobalRenderLabels = true;
GlobalWhichLabels = 3; //n
break;
case '1':
GlobalColoursPlanView = 1;//v
break;
case '4':
GlobalColoursPlanView = 0;//nothing
break;
case '2':
GlobalColoursPlanView = 2;//T
break;
case '0':
steps_remaining = 0;
break;
case 'Q':
newEye.z += 5.0f;
printf("newEye.z %1.9E\n", newEye.z);
break;
case 'P':
newEye.z -= 5.0f;
printf("newEye.z %1.9E\n", newEye.z);
break;
case 'X':
newEye.y += 5.0f;
printf("newEye.y %1.9E\n", newEye.y);
break;
case 'Z':
newEye.y -= 5.0f;
printf("newEye.y %1.9E\n", newEye.y);
break;
case 'O':
newLookat.z -= 3.0f;
printf("newLookat.z %1.9E\n", newLookat.z);
break;
case ';':
case ':':
newLookat.z += 3.0f;
printf("newLookat.z %1.9E\n", newLookat.z);
break;
default:
return DefWindowProc(hWnd, message, wParam, lParam);
};
//PlanViewGraphs1(*pX);
RefreshGraphs(*pX, GlobalSpeciesToGraph); // sends data to graphs AND renders them
Direct3D.pd3dDevice->Present(NULL, NULL, NULL, NULL);
break;
case WM_PAINT:
// Not sure, do we want to do this?
// RefreshGraphs(*pX,); // sends data to graphs AND renders them
GetUpdateRect(hWnd, &rect, FALSE);
if (Direct3D.pd3dDevice != NULL)
Direct3D.pd3dDevice->Present(&rect, &rect, NULL, NULL);
ValidateRect(hWnd, NULL);
break;
case WM_DESTROY:
DeleteObject(dib);
DeleteDC(dibdc);
for (i = 0; i < NUMAVI; i++)
{
pSinkWriter[i]->Finalize();
SafeRelease(&(pSinkWriter[i]));
}
// CloseAvi(hAvi[i]);
// _controlfp_s(0, cw, _MCW_EM); // Line A
PerformCUDA_Revoke();
MFShutdown();
CoUninitialize();
PostQuitMessage(0);
break;
default:
return DefWindowProc(hWnd, message, wParam, lParam);
}
return 0;
}
// Message handler for about box.
INT_PTR CALLBACK About(HWND hDlg, UINT message, WPARAM wParam, LPARAM lParam)
{
UNREFERENCED_PARAMETER(lParam);
switch (message)
{
case WM_INITDIALOG:
return (INT_PTR)TRUE;
case WM_COMMAND:
if (LOWORD(wParam) == IDOK || LOWORD(wParam) == IDCANCEL)
{
EndDialog(hDlg, LOWORD(wParam));
return (INT_PTR)TRUE;
}
break;
}
return (INT_PTR)FALSE;
}
INT_PTR CALLBACK SetupBox(HWND hDlg, UINT message, WPARAM wParam, LPARAM lParam)
{
UNREFERENCED_PARAMETER(lParam);
char buffer[2048];
char string[1024];
real newh;
switch (message)
{
case WM_INITDIALOG:
sprintf(buffer, "New h? (present = %1.10E)", h);
if (GlobalSwitchBox)
SetDlgItemText(hDlg, IDC_STATIC, buffer);
return (INT_PTR)TRUE;
case WM_COMMAND:
if (LOWORD(wParam) == IDOK)
{
// try to read data from edit control:
GetDlgItemText(hDlg, IDC_EDIT1, buffer, 2048);
if (GlobalSwitchBox == 0)
{
//
steps_remaining = atoi(buffer);
if (steps_remaining >= 0)
{
EndDialog(hDlg, LOWORD(wParam));
}
else {
MessageBox(NULL, "incorrect value", "Enter a nonnegative integer.", MB_OK);
};
}
else {
newh = atof(buffer);
if (newh > 0.0)
{
EndDialog(hDlg, LOWORD(wParam));
sprintf(string, "h = %1.10E\n", newh);
h = newh;
MessageBox(NULL, string, "New value of h", MB_OK);
}
else {
MessageBox(NULL, "no good", "Negative h entered", MB_OK);
};
};
return (INT_PTR)TRUE;
}
break;
}
return (INT_PTR)FALSE;
}
|
466ca2cea3c370822f2880b5277ed9ee69a95063.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/native/Pool.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <THH/THHNumerics.cuh>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
__device__ inline int max(int a, int b) {
return a >= b ? a : b;
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool2d_out_cuda_frame(const int nthreads,
const scalar_t* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
scalar_t* const top_data, const int divisor_override,
const bool count_include_pad, const bool use_divisor) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
if (hstart >= hend || wstart >= wend) {
top_data[index] = scalar_t(0);
continue;
}
accscalar_t aveval = accscalar_t(0);
const scalar_t* const bottom_slice = bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
int divide_factor;
if (use_divisor) {
divide_factor = divisor_override;
} else {
if(count_include_pad) {
divide_factor = pool_size;
} else {
divide_factor = (hend - hstart) * (wend - wstart);
}
}
top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / divide_factor);
}
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool2d_out_cuda_frame_nhwc(const int nthreads,
const scalar_t* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
scalar_t* const top_data, const int divisor_override,
const bool count_include_pad, const bool use_divisor) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = index % channels;
const int pw = (index / channels) % pooled_width;
const int ph = (index / channels / pooled_width) % pooled_height;
const int n = index / channels / pooled_width / pooled_height;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
if (hstart >= hend || wstart >= wend) {
top_data[index] = scalar_t(0);
continue;
}
accscalar_t aveval = accscalar_t(0);
const scalar_t* const bottom_slice = bottom_data + n * channels * height * width + c;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[(h * width + w) * channels];
}
}
int divide_factor;
if (use_divisor) {
divide_factor = divisor_override;
} else {
if(count_include_pad) {
divide_factor = pool_size;
} else {
divide_factor = (hend - hstart) * (wend - wstart);
}
}
top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / divide_factor);
}
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool2d_backward_out_cuda_frame(const int nthreads, const scalar_t* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
scalar_t* const bottom_diff, const int divisor_override,
bool count_include_pad, bool use_divisor) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
accscalar_t gradient = accscalar_t(0);
const scalar_t* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
if (hstart >= hend || wstart >= wend) {
continue;
}
int divide_factor;
if (use_divisor) {
divide_factor = divisor_override;
} else {
if(count_include_pad) {
divide_factor = pool_size;
} else {
divide_factor = (hend - hstart) * (wend - wstart);
}
}
gradient += top_diff_slice[ph * pooled_width + pw] / divide_factor;
}
}
bottom_diff[index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient);
}
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool2d_backward_out_cuda_frame_nhwc(const int nthreads,
const scalar_t* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
scalar_t* const bottom_diff, const int divisor_override,
bool count_include_pad, bool use_divisor) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = index % channels;
const int w = (index / channels) % width;
const int h = (index / channels / width) % height;
const int n = index / channels / width / height;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
accscalar_t gradient = accscalar_t(0);
const scalar_t* const top_diff_slice = top_diff + n * channels * pooled_height * pooled_width + c;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
if (hstart >= hend || wstart >= wend) {
continue;
}
int divide_factor;
if (use_divisor) {
divide_factor = divisor_override;
} else {
if(count_include_pad) {
divide_factor = pool_size;
} else {
divide_factor = (hend - hstart) * (wend - wstart);
}
}
gradient += top_diff_slice[(ph * pooled_width + pw) * channels] / divide_factor;
}
}
bottom_diff[index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient);
}
}
void avg_pool2d_out_cuda_template(
Tensor& output,
const Tensor& input_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg input_arg{ input_, "input_", 2 };
checkAllSameGPU("avg_pool2d_out_cuda", {output_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2,
"avg_pool2d: kernel_size must either be a single int, or a tuple of two ints");
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);
TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2,
"avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints");
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 2,
"avg_pool2d: padding must either be a single int, or a tuple of two ints");
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
const auto memory_format = input_.suggest_memory_format();
if (memory_format == at::MemoryFormat::ChannelsLast){
TORCH_CHECK(input_.ndimension() == 4,
"non-empty 4D (batch mode) tensor expected for input with channels_last layout");
} else {
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
}
TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0,
"divisor must be not zero");
const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1;
const int64_t nInputPlane = input_.size(-3);
const int64_t inputHeight = input_.size(-2);
const int64_t inputWidth = input_.size(-1);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode);
pool2d_shape_check(
input_,
kH, kW, dH, dW, padH, padW, 1, 1,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth);
Tensor input = input_.contiguous(memory_format);
output.resize_({nbatch, nInputPlane, outputHeight, outputWidth});
const int32_t count = safe_downcast<int32_t, int64_t>(output.numel());
const uint32_t num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
const uint32_t num_blocks = cuda::ATenCeilDiv<uint32_t>(count, num_threads);
bool use_divisor = divisor_override.has_value();
const auto divisor_override_value = use_divisor ? divisor_override.value() : 0;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *output_data = output.data_ptr<scalar_t>();
scalar_t *input_data = input.data_ptr<scalar_t>();
switch (memory_format){
case MemoryFormat::ChannelsLast: {
output.unsafeGetTensorImpl()->empty_tensor_restride(MemoryFormat::ChannelsLast);
hipLaunchKernelGGL(( avg_pool2d_out_cuda_frame_nhwc<scalar_t, accscalar_t>)
, dim3(num_blocks), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
input_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
output_data,
divisor_override_value,
count_include_pad, use_divisor);
break;
}
case MemoryFormat::Contiguous: {
hipLaunchKernelGGL(( avg_pool2d_out_cuda_frame<scalar_t, accscalar_t>)
, dim3(num_blocks), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
input_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
output_data,
divisor_override_value,
count_include_pad, use_divisor);
break;
}
default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
});
}
);
AT_CUDA_CHECK(hipGetLastError());
if (input.ndimension() == 3) {
output.resize_({nInputPlane, outputHeight, outputWidth});
}
}
Tensor& avg_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 };
TensorArg input_arg{ input_, "input_", 3 };
checkAllSameGPU("avg_pool2d_backward_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2,
"avg_pool2d: kernel_size must either be a single int, or a tuple of two ints");
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);
TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2,
"avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints");
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 2,
"avg_pool2d: padding must either be a single int, or a tuple of two ints");
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0,
"divisor must be not zero");
const auto memory_format = input_.suggest_memory_format();
if (memory_format == at::MemoryFormat::ChannelsLast) {
TORCH_CHECK(input_.ndimension() == 4,
"non-empty 4D (batch mode) tensor expected for input with channels_last layout");
} else {
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
}
const Tensor input = input_.contiguous(memory_format);
const Tensor gradOutput = gradOutput_.contiguous(memory_format);
const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1;
const int64_t nInputPlane = input.size(-3);
const int64_t inputHeight = input.size(-2);
const int64_t inputWidth = input.size(-1);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode);
avg_pool2d_backward_shape_check(
input_,
gradOutput_,
nbatch,
kH, kW, dH, dW, padH, padW,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth);
gradInput.resize_as_(input);
const int32_t count = safe_downcast<int32_t, int64_t>(input.numel());
const uint32_t num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
const uint32_t num_blocks = cuda::ATenCeilDiv<uint32_t>(count, num_threads);
bool use_divisor = divisor_override.has_value();
const auto divisor_override_value = use_divisor ? divisor_override.value() : 0;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_backward_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_backward_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
switch (memory_format) {
case MemoryFormat::ChannelsLast: {
gradInput.unsafeGetTensorImpl()->empty_tensor_restride(MemoryFormat::ChannelsLast);
hipLaunchKernelGGL(( avg_pool2d_backward_out_cuda_frame_nhwc<scalar_t, accscalar_t>)
, dim3(num_blocks), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
gradOutput_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
gradInput_data,
divisor_override_value,
count_include_pad, use_divisor);
break;
}
case MemoryFormat::Contiguous: {
hipLaunchKernelGGL(( avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t>)
, dim3(num_blocks), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
gradOutput_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
gradInput_data,
divisor_override_value,
count_include_pad, use_divisor);
break;
}
default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
});
}
);
AT_CUDA_CHECK(hipGetLastError());
return gradInput;
}
} // namespace
Tensor& avg_pool2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
avg_pool2d_out_cuda_template(
output,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return output;
}
Tensor avg_pool2d_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
Tensor output = at::empty({0}, input.options());
avg_pool2d_out_cuda_template(
output,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return output;
}
Tensor& avg_pool2d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
avg_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return gradInput;
}
Tensor avg_pool2d_backward_cuda(
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
avg_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return gradInput;
}
} // at::native
} // at
| 466ca2cea3c370822f2880b5277ed9ee69a95063.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/native/Pool.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <THC/THCNumerics.cuh>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
__device__ inline int max(int a, int b) {
return a >= b ? a : b;
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool2d_out_cuda_frame(const int nthreads,
const scalar_t* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
scalar_t* const top_data, const int divisor_override,
const bool count_include_pad, const bool use_divisor) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
if (hstart >= hend || wstart >= wend) {
top_data[index] = scalar_t(0);
continue;
}
accscalar_t aveval = accscalar_t(0);
const scalar_t* const bottom_slice = bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
int divide_factor;
if (use_divisor) {
divide_factor = divisor_override;
} else {
if(count_include_pad) {
divide_factor = pool_size;
} else {
divide_factor = (hend - hstart) * (wend - wstart);
}
}
top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / divide_factor);
}
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool2d_out_cuda_frame_nhwc(const int nthreads,
const scalar_t* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
scalar_t* const top_data, const int divisor_override,
const bool count_include_pad, const bool use_divisor) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = index % channels;
const int pw = (index / channels) % pooled_width;
const int ph = (index / channels / pooled_width) % pooled_height;
const int n = index / channels / pooled_width / pooled_height;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
if (hstart >= hend || wstart >= wend) {
top_data[index] = scalar_t(0);
continue;
}
accscalar_t aveval = accscalar_t(0);
const scalar_t* const bottom_slice = bottom_data + n * channels * height * width + c;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[(h * width + w) * channels];
}
}
int divide_factor;
if (use_divisor) {
divide_factor = divisor_override;
} else {
if(count_include_pad) {
divide_factor = pool_size;
} else {
divide_factor = (hend - hstart) * (wend - wstart);
}
}
top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / divide_factor);
}
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool2d_backward_out_cuda_frame(const int nthreads, const scalar_t* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
scalar_t* const bottom_diff, const int divisor_override,
bool count_include_pad, bool use_divisor) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
accscalar_t gradient = accscalar_t(0);
const scalar_t* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
if (hstart >= hend || wstart >= wend) {
continue;
}
int divide_factor;
if (use_divisor) {
divide_factor = divisor_override;
} else {
if(count_include_pad) {
divide_factor = pool_size;
} else {
divide_factor = (hend - hstart) * (wend - wstart);
}
}
gradient += top_diff_slice[ph * pooled_width + pw] / divide_factor;
}
}
bottom_diff[index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient);
}
}
template <typename scalar_t, typename accscalar_t>
__global__ void avg_pool2d_backward_out_cuda_frame_nhwc(const int nthreads,
const scalar_t* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
scalar_t* const bottom_diff, const int divisor_override,
bool count_include_pad, bool use_divisor) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = index % channels;
const int w = (index / channels) % width;
const int h = (index / channels / width) % height;
const int n = index / channels / width / height;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
accscalar_t gradient = accscalar_t(0);
const scalar_t* const top_diff_slice = top_diff + n * channels * pooled_height * pooled_width + c;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
if (hstart >= hend || wstart >= wend) {
continue;
}
int divide_factor;
if (use_divisor) {
divide_factor = divisor_override;
} else {
if(count_include_pad) {
divide_factor = pool_size;
} else {
divide_factor = (hend - hstart) * (wend - wstart);
}
}
gradient += top_diff_slice[(ph * pooled_width + pw) * channels] / divide_factor;
}
}
bottom_diff[index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient);
}
}
void avg_pool2d_out_cuda_template(
Tensor& output,
const Tensor& input_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg input_arg{ input_, "input_", 2 };
checkAllSameGPU("avg_pool2d_out_cuda", {output_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2,
"avg_pool2d: kernel_size must either be a single int, or a tuple of two ints");
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);
TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2,
"avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints");
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 2,
"avg_pool2d: padding must either be a single int, or a tuple of two ints");
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
const auto memory_format = input_.suggest_memory_format();
if (memory_format == at::MemoryFormat::ChannelsLast){
TORCH_CHECK(input_.ndimension() == 4,
"non-empty 4D (batch mode) tensor expected for input with channels_last layout");
} else {
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
}
TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0,
"divisor must be not zero");
const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1;
const int64_t nInputPlane = input_.size(-3);
const int64_t inputHeight = input_.size(-2);
const int64_t inputWidth = input_.size(-1);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode);
pool2d_shape_check(
input_,
kH, kW, dH, dW, padH, padW, 1, 1,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth);
Tensor input = input_.contiguous(memory_format);
output.resize_({nbatch, nInputPlane, outputHeight, outputWidth});
const int32_t count = safe_downcast<int32_t, int64_t>(output.numel());
const uint32_t num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
const uint32_t num_blocks = cuda::ATenCeilDiv<uint32_t>(count, num_threads);
bool use_divisor = divisor_override.has_value();
const auto divisor_override_value = use_divisor ? divisor_override.value() : 0;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *output_data = output.data_ptr<scalar_t>();
scalar_t *input_data = input.data_ptr<scalar_t>();
switch (memory_format){
case MemoryFormat::ChannelsLast: {
output.unsafeGetTensorImpl()->empty_tensor_restride(MemoryFormat::ChannelsLast);
avg_pool2d_out_cuda_frame_nhwc<scalar_t, accscalar_t>
<<<num_blocks, num_threads, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
input_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
output_data,
divisor_override_value,
count_include_pad, use_divisor);
break;
}
case MemoryFormat::Contiguous: {
avg_pool2d_out_cuda_frame<scalar_t, accscalar_t>
<<<num_blocks, num_threads, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
input_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
output_data,
divisor_override_value,
count_include_pad, use_divisor);
break;
}
default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
});
}
);
AT_CUDA_CHECK(cudaGetLastError());
if (input.ndimension() == 3) {
output.resize_({nInputPlane, outputHeight, outputWidth});
}
}
Tensor& avg_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 };
TensorArg input_arg{ input_, "input_", 3 };
checkAllSameGPU("avg_pool2d_backward_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg});
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2,
"avg_pool2d: kernel_size must either be a single int, or a tuple of two ints");
const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);
TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2,
"avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints");
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW :
stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]);
TORCH_CHECK(padding.size() == 1 || padding.size() == 2,
"avg_pool2d: padding must either be a single int, or a tuple of two ints");
const int padH = safe_downcast<int, int64_t>(padding[0]);
const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]);
TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0,
"divisor must be not zero");
const auto memory_format = input_.suggest_memory_format();
if (memory_format == at::MemoryFormat::ChannelsLast) {
TORCH_CHECK(input_.ndimension() == 4,
"non-empty 4D (batch mode) tensor expected for input with channels_last layout");
} else {
TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input");
}
const Tensor input = input_.contiguous(memory_format);
const Tensor gradOutput = gradOutput_.contiguous(memory_format);
const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1;
const int64_t nInputPlane = input.size(-3);
const int64_t inputHeight = input.size(-2);
const int64_t inputWidth = input.size(-1);
const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode);
const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode);
avg_pool2d_backward_shape_check(
input_,
gradOutput_,
nbatch,
kH, kW, dH, dW, padH, padW,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth);
gradInput.resize_as_(input);
const int32_t count = safe_downcast<int32_t, int64_t>(input.numel());
const uint32_t num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024);
const uint32_t num_blocks = cuda::ATenCeilDiv<uint32_t>(count, num_threads);
bool use_divisor = divisor_override.has_value();
const auto divisor_override_value = use_divisor ? divisor_override.value() : 0;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(),
"avg_pool2d_backward_out_cuda_frame",
[&] {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "avg_pool2d_backward_out_cuda_frame", [&] {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
switch (memory_format) {
case MemoryFormat::ChannelsLast: {
gradInput.unsafeGetTensorImpl()->empty_tensor_restride(MemoryFormat::ChannelsLast);
avg_pool2d_backward_out_cuda_frame_nhwc<scalar_t, accscalar_t>
<<<num_blocks, num_threads, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
gradOutput_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
gradInput_data,
divisor_override_value,
count_include_pad, use_divisor);
break;
}
case MemoryFormat::Contiguous: {
avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t>
<<<num_blocks, num_threads, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
gradOutput_data,
nbatch,
nInputPlane,
inputHeight, inputWidth,
outputHeight, outputWidth,
kH, kW,
dH, dW,
padH, padW,
gradInput_data,
divisor_override_value,
count_include_pad, use_divisor);
break;
}
default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous");
}
});
}
);
AT_CUDA_CHECK(cudaGetLastError());
return gradInput;
}
} // namespace
Tensor& avg_pool2d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
avg_pool2d_out_cuda_template(
output,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return output;
}
Tensor avg_pool2d_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
Tensor output = at::empty({0}, input.options());
avg_pool2d_out_cuda_template(
output,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return output;
}
Tensor& avg_pool2d_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
avg_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return gradInput;
}
Tensor avg_pool2d_backward_cuda(
const Tensor& gradOutput_,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
bool ceil_mode,
bool count_include_pad,
c10::optional<int64_t> divisor_override)
{
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
avg_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
kernel_size,
stride,
padding,
ceil_mode,
count_include_pad,
divisor_override);
return gradInput;
}
} // at::native
} // at
|
5baed6e588979bafb996f41f676320292de84016.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void square(float *d_out, float *d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char **argv){
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
float h_out[ARRAY_SIZE];
for(int i = 0 ; i < ARRAY_SIZE ; i++){
h_in[i] = float(i);
}
float * d_in;
float * d_out;
// hipMalloc((void **) &d_in, ARRAY_BYTES);
// hipMalloc((void **) &d_out, ARRAY_BYTES);
hipMalloc(&d_in, ARRAY_BYTES);
hipMalloc(&d_out, ARRAY_BYTES);
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( square), dim3(1),dim3(ARRAY_SIZE), 0, 0, d_out,d_in);
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
for(int i = 0; i < ARRAY_SIZE; i++){
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n" );
}
hipFree(d_in);
hipFree(d_out);
return 0;
}
| 5baed6e588979bafb996f41f676320292de84016.cu | #include <stdio.h>
__global__
void square(float *d_out, float *d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char **argv){
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
float h_out[ARRAY_SIZE];
for(int i = 0 ; i < ARRAY_SIZE ; i++){
h_in[i] = float(i);
}
float * d_in;
float * d_out;
// cudaMalloc((void **) &d_in, ARRAY_BYTES);
// cudaMalloc((void **) &d_out, ARRAY_BYTES);
cudaMalloc(&d_in, ARRAY_BYTES);
cudaMalloc(&d_out, ARRAY_BYTES);
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
square<<<1,ARRAY_SIZE>>>(d_out,d_in);
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
for(int i = 0; i < ARRAY_SIZE; i++){
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n" );
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
9c73d75b787a7516ec213628fffbbb3d1a464639.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void gt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "gt_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "gt_cuda", [&] {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a > b;
});
});
});
}
REGISTER_DISPATCH(gt_stub, >_kernel_cuda);
}} // namespace at::native
| 9c73d75b787a7516ec213628fffbbb3d1a464639.cu | #include <ATen/Dispatch.h>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void gt_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "gt_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "gt_cuda", [&] {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a > b;
});
});
});
}
REGISTER_DISPATCH(gt_stub, >_kernel_cuda);
}} // namespace at::native
|
a8c87839ccd1a9a7135c8c637d50f7f8d0ceb5df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020, Tobias Rapp
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the Karlsruhe Institute of Technology nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "moment_image_device.cuh"
#include "mese/MESE_dynamic.h"
#include "moment_preparation.h"
#include "moment_prediction_coding.h"
#include "coding_transform.cuh"
#include "cut/timing.h"
#include "cut/strided_array.h"
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
using namespace cut;
__constant__ float c_coding_params[512];
int MomentImageDevice::get_best_smem_launch_config(int bytes_per_moment, dim3 &num_blocks,
dim3 &threads_per_block) const
{
auto max_moments = view.num_moments;
int max_shared_mem = g_DeviceProp.sharedMemPerBlock;
threads_per_block = dim3(8, 4);
num_blocks.x = (view.width + threads_per_block.x - 1) / threads_per_block.x;
num_blocks.y = (view.height + threads_per_block.y - 1) / threads_per_block.y;
int required_smem = threads_per_block.x * threads_per_block.y * max_moments * bytes_per_moment;
while (required_smem > max_shared_mem)
{
threads_per_block.x = threads_per_block.x - 1;
assert(threads_per_block.x > 0);
num_blocks.x = (view.width + threads_per_block.x - 1) / threads_per_block.x;
num_blocks.y = (view.height + threads_per_block.y - 1) / threads_per_block.y;
required_smem = threads_per_block.x * threads_per_block.y * max_moments * bytes_per_moment;
}
return required_smem;
}
__global__ void compaction_copy_kernel(const MomentImageDeviceView mi, const float *in, float *out)
{
Vec3i gid(__umul24(blockIdx.x, blockDim.x) + threadIdx.x,
__umul24(blockIdx.y, blockDim.y) + threadIdx.y,
__umul24(blockIdx.z, blockDim.z) + threadIdx.z);
if (gid.x >= mi.width || gid.y >= mi.height || gid.z >= mi.num_moments)
return;
auto old_idx = mi.get_idx(gid.x, gid.y);
auto new_idx = mi.index[gid.y * mi.width + gid.x];
auto num = mi.index[gid.y * mi.width + gid.x + 1] - new_idx;
if (gid.z >= num)
return;
out[new_idx + gid.z] = in[old_idx + gid.z];
}
void MomentImageDevice::compact()
{
NVTX_RANGE("Image compaction");
assert(!view.is_compact);
thrust::device_ptr<uint32_t> index_ptr(index.get());
auto index_size = view.width * view.height;
thrust::exclusive_scan(index_ptr, index_ptr + index_size + 1, index_ptr);
uint32_t new_data_size;
CHECK_CUDA(hipMemcpy(&new_data_size, index.get() + index_size, sizeof(uint32_t), hipMemcpyDeviceToHost));
cut::dev_ptr<float> new_data(new_data_size);
dim3 threads_per_block(4, 4, 32);
dim3 num_blocks;
num_blocks.x = (view.width + threads_per_block.x - 1) / threads_per_block.x;
num_blocks.y = (view.height + threads_per_block.y - 1) / threads_per_block.y;
num_blocks.z = (view.num_moments + threads_per_block.z - 1) / threads_per_block.z;
hipLaunchKernelGGL(( compaction_copy_kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0, view, data.get(), new_data.get());
std::swap(data.m_ptr, new_data.m_ptr);
view.data = data.get();
data_size = new_data_size;
view.is_compact = true;
}
__global__ void prediction_encode_kernel(MomentImageDeviceView mi, float bias)
{
Vec2i gid(__umul24(blockIdx.x, blockDim.x) + threadIdx.x, __umul24(blockIdx.y, blockDim.y) + threadIdx.y);
if (gid.x >= mi.width || gid.y >= mi.height)
return;
auto num_moments = mi.get_num_moments(gid.x, gid.y);
if (num_moments <= 0)
return;
extern __shared__ float_complex storage[];
int t_idx = threadIdx.y * blockDim.x + threadIdx.x;
int t_offset = blockDim.x * blockDim.y;
int array_offset = mi.num_moments * t_offset;
strided_array<float_complex> code(&storage[t_idx], t_offset);
strided_array<float_complex> exp_moments(&storage[t_idx + array_offset], t_offset);
strided_array<float_complex> eval_polynom(&storage[t_idx + 2 * array_offset], t_offset);
strided_array<float_complex> temp(&storage[t_idx + 3 * array_offset], t_offset);
auto idx = mi.get_idx(gid.x, gid.y);
mi.data[idx] = cut::lerp(mi.data[idx], 0.5f, bias);
for (int i = 1; i < num_moments; ++i)
mi.data[idx + i] = cut::lerp(mi.data[idx + i], 0.0f, bias);
trigonometricToExponentialMoments(num_moments, exp_moments, &mi.data[idx]);
encode(num_moments, exp_moments, code, eval_polynom, temp);
transform_quantization_real(num_moments, code, mi.data[idx], &mi.data[idx]);
}
__global__ void prediction_encode_warping_kernel(MomentImageDeviceView mi)
{
Vec3i gid(__umul24(blockIdx.x, blockDim.x) + threadIdx.x,
__umul24(blockIdx.y, blockDim.y) + threadIdx.y,
__umul24(blockIdx.z, blockDim.z) + threadIdx.z);
if (gid.x >= mi.width || gid.y >= mi.height || gid.z >= mi.num_moments)
return;
int l = 1 + gid.z;
auto num_moments = mi.get_num_moments(gid.x, gid.y);
if (num_moments <= l)
return;
auto idx = mi.get_idx(gid.x, gid.y);
mi.data[idx + l] = prediction_coding_warp(l, mi.data[idx + l], WarpParameters{mi.coding_warp, c_coding_params});
}
void MomentImageDevice::prediction_encode(int coding_warp, float bias)
{
assert(view.is_compact);
SCOPED_CUDA_QUERY("Prediction coding");
view.prediction_code = true;
view.coding_warp = coding_warp;
dim3 threads_per_block;
dim3 num_blocks;
auto shared_mem = get_best_smem_launch_config(sizeof(float_complex) * 4, num_blocks, threads_per_block);
hipLaunchKernelGGL(( prediction_encode_kernel), dim3(num_blocks), dim3(threads_per_block), shared_mem, 0, view, bias);
if (requires_coding_parameters(coding_warp))
{
if (coding_warp == CODING_WARP_DEFAULT_TRANSFORMED)
coding_params = CodingParamType::quantize(find_coding_transform_parameters_device(view));
load_coding_params();
}
threads_per_block = dim3(4, 4, 16);
num_blocks.x = (view.width + threads_per_block.x - 1) / threads_per_block.x;
num_blocks.y = (view.height + threads_per_block.y - 1) / threads_per_block.y;
num_blocks.z = (view.num_moments - 1 + threads_per_block.z - 1) / threads_per_block.z;
hipLaunchKernelGGL(( prediction_encode_warping_kernel), dim3(num_blocks), dim3(threads_per_block), 0, 0, view);
}
__global__ void revert_prediction_coding_kernel(MomentImageDeviceView mi)
{
Vec2i gid(__umul24(blockIdx.x, blockDim.x) + threadIdx.x, __umul24(blockIdx.y, blockDim.y) + threadIdx.y);
if (gid.x >= mi.width || gid.y >= mi.height)
return;
auto num_moments = mi.get_num_moments(gid.x, gid.y);
if (num_moments <= 0)
return;
extern __shared__ float_complex storage[];
int t_idx = threadIdx.y * blockDim.x + threadIdx.x;
int t_offset = blockDim.x * blockDim.y;
int array_offset = mi.num_moments * t_offset;
strided_array<float_complex> code(&storage[t_idx], t_offset);
strided_array<float_complex> exp_moments(&storage[t_idx + array_offset], t_offset);
strided_array<float_complex> eval_polynom(&storage[t_idx + 2 * array_offset], t_offset);
strided_array<float_complex> temp(&storage[t_idx + 3 * array_offset], t_offset);
auto idx = mi.get_idx(gid.x, gid.y);
transform_dequantization_real(num_moments, &mi.data[idx], code, WarpParameters{mi.coding_warp, c_coding_params});
decode(num_moments, code, exp_moments, eval_polynom, temp);
exponentialToTrigonometricMoments(num_moments, temp, exp_moments);
for (int l = 0; l < num_moments; ++l)
mi.data[idx + l] = temp[l].x;
}
void MomentImageDevice::revert_prediction_coding()
{
SCOPED_CUDA_QUERY("Revert prediction coding");
assert(view.prediction_code);
load_coding_params();
dim3 threads_per_block;
dim3 num_blocks;
auto shared_mem = get_best_smem_launch_config(sizeof(float_complex) * 4, num_blocks, threads_per_block);
hipLaunchKernelGGL(( revert_prediction_coding_kernel), dim3(num_blocks), dim3(threads_per_block), shared_mem, 0, view);
view.prediction_code = false;
}
__global__ void prepareMomentsFromPredictionCodingKernel(MomentImageDeviceView mi, float *pmoments)
{
Vec2i gid(__umul24(blockIdx.x, blockDim.x) + threadIdx.x, __umul24(blockIdx.y, blockDim.y) + threadIdx.y);
if (gid.x >= mi.width || gid.y >= mi.height)
return;
Vec2f ndc(gid.x / static_cast<float>(mi.width) * 2.f - 1.f, gid.y / static_cast<float>(mi.height) * 2.f - 1.f);
auto idx = mi.get_idx(gid.x, gid.y);
auto num_moments = mi.get_num_moments(gid.x, gid.y);
if (num_moments <= 0)
return;
float *prepared_moments = &pmoments[idx];
extern __shared__ float_complex storage[];
int t_idx = threadIdx.y * blockDim.x + threadIdx.x;
int t_offset = blockDim.x * blockDim.y;
int array_offset = mi.num_moments * t_offset;
strided_array<float_complex> code(&storage[t_idx], t_offset);
strided_array<float_complex> exp_moments(&storage[t_idx + array_offset], t_offset);
strided_array<float_complex> eval_polynom(&storage[t_idx + 2 * array_offset], t_offset);
strided_array<float_complex> temp(&storage[t_idx + 3 * array_offset], t_offset);
prepare_moments_from_pred_coding(num_moments, &mi.data[idx], prepared_moments, code, exp_moments, eval_polynom,
temp, WarpParameters{mi.coding_warp, c_coding_params});
}
__global__ void prepareMomentsKernel(MomentImageDeviceView mi, float *pmoments, float bias)
{
Vec2i gid(__umul24(blockIdx.x, blockDim.x) + threadIdx.x, __umul24(blockIdx.y, blockDim.y) + threadIdx.y);
if (gid.x >= mi.width || gid.y >= mi.height)
return;
Vec2f ndc(gid.x / static_cast<float>(mi.width) * 2.f - 1.f, gid.y / static_cast<float>(mi.height) * 2.f - 1.f);
auto idx = mi.get_idx(gid.x, gid.y);
auto num_moments = mi.get_num_moments(gid.x, gid.y);
if (num_moments == 0)
return;
extern __shared__ float_complex storage[];
int t_idx = threadIdx.y * blockDim.x + threadIdx.x;
int t_offset = blockDim.x * blockDim.y;
int array_offset = mi.num_moments * t_offset;
strided_array<float_complex> temp0(&storage[t_idx], t_offset);
strided_array<float_complex> temp1(&storage[t_idx + array_offset], t_offset);
strided_array<float_complex> temp2(&storage[t_idx + 2 * array_offset], t_offset);
prepare_moments_from_std_coding(num_moments, &mi.data[idx], &pmoments[idx], temp0, temp1, temp2, bias);
}
void MomentImageDevice::prepare_moments_device(float bias)
{
dim3 threadsPerBlock(4, 4);
dim3 numBlocks;
numBlocks.x = (view.width + threadsPerBlock.x - 1) / threadsPerBlock.x;
numBlocks.y = (view.height + threadsPerBlock.y - 1) / threadsPerBlock.y;
cut::dev_ptr<float> pmoments(data_size);
if (view.prediction_code)
{
SCOPED_CUDA_QUERY("Prediction preparation");
load_coding_params();
auto shared_mem = get_best_smem_launch_config(sizeof(float_complex) * 4, numBlocks, threadsPerBlock);
hipLaunchKernelGGL(( prepareMomentsFromPredictionCodingKernel), dim3(numBlocks), dim3(threadsPerBlock), shared_mem, 0, view, pmoments.get());
}
else
{
SCOPED_CUDA_QUERY("Standard preparation");
auto shared_mem = get_best_smem_launch_config(sizeof(float_complex) * 3, numBlocks, threadsPerBlock);
hipLaunchKernelGGL(( prepareMomentsKernel), dim3(numBlocks), dim3(threadsPerBlock), shared_mem, 0, view, pmoments.get(), bias);
}
std::swap(data, pmoments);
view.data = data.get();
}
void MomentImageDevice::load_coding_params()
{
if (requires_coding_parameters(view.coding_warp))
{
assert(!coding_params.empty());
auto params = CodingParamType::dequantize(coding_params);
CHECK_CUDA(hipMemcpyToSymbol(c_coding_params, params.data(), sizeof(float) * params.size(), 0,
hipMemcpyHostToDevice));
}
}
| a8c87839ccd1a9a7135c8c637d50f7f8d0ceb5df.cu | // Copyright (c) 2020, Tobias Rapp
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the Karlsruhe Institute of Technology nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "moment_image_device.cuh"
#include "mese/MESE_dynamic.h"
#include "moment_preparation.h"
#include "moment_prediction_coding.h"
#include "coding_transform.cuh"
#include "cut/timing.h"
#include "cut/strided_array.h"
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
using namespace cut;
__constant__ float c_coding_params[512];
int MomentImageDevice::get_best_smem_launch_config(int bytes_per_moment, dim3 &num_blocks,
dim3 &threads_per_block) const
{
auto max_moments = view.num_moments;
int max_shared_mem = g_DeviceProp.sharedMemPerBlock;
threads_per_block = dim3(8, 4);
num_blocks.x = (view.width + threads_per_block.x - 1) / threads_per_block.x;
num_blocks.y = (view.height + threads_per_block.y - 1) / threads_per_block.y;
int required_smem = threads_per_block.x * threads_per_block.y * max_moments * bytes_per_moment;
while (required_smem > max_shared_mem)
{
threads_per_block.x = threads_per_block.x - 1;
assert(threads_per_block.x > 0);
num_blocks.x = (view.width + threads_per_block.x - 1) / threads_per_block.x;
num_blocks.y = (view.height + threads_per_block.y - 1) / threads_per_block.y;
required_smem = threads_per_block.x * threads_per_block.y * max_moments * bytes_per_moment;
}
return required_smem;
}
__global__ void compaction_copy_kernel(const MomentImageDeviceView mi, const float *in, float *out)
{
Vec3i gid(__umul24(blockIdx.x, blockDim.x) + threadIdx.x,
__umul24(blockIdx.y, blockDim.y) + threadIdx.y,
__umul24(blockIdx.z, blockDim.z) + threadIdx.z);
if (gid.x >= mi.width || gid.y >= mi.height || gid.z >= mi.num_moments)
return;
auto old_idx = mi.get_idx(gid.x, gid.y);
auto new_idx = mi.index[gid.y * mi.width + gid.x];
auto num = mi.index[gid.y * mi.width + gid.x + 1] - new_idx;
if (gid.z >= num)
return;
out[new_idx + gid.z] = in[old_idx + gid.z];
}
void MomentImageDevice::compact()
{
NVTX_RANGE("Image compaction");
assert(!view.is_compact);
thrust::device_ptr<uint32_t> index_ptr(index.get());
auto index_size = view.width * view.height;
thrust::exclusive_scan(index_ptr, index_ptr + index_size + 1, index_ptr);
uint32_t new_data_size;
CHECK_CUDA(cudaMemcpy(&new_data_size, index.get() + index_size, sizeof(uint32_t), cudaMemcpyDeviceToHost));
cut::dev_ptr<float> new_data(new_data_size);
dim3 threads_per_block(4, 4, 32);
dim3 num_blocks;
num_blocks.x = (view.width + threads_per_block.x - 1) / threads_per_block.x;
num_blocks.y = (view.height + threads_per_block.y - 1) / threads_per_block.y;
num_blocks.z = (view.num_moments + threads_per_block.z - 1) / threads_per_block.z;
compaction_copy_kernel<<<num_blocks, threads_per_block>>>(view, data.get(), new_data.get());
std::swap(data.m_ptr, new_data.m_ptr);
view.data = data.get();
data_size = new_data_size;
view.is_compact = true;
}
__global__ void prediction_encode_kernel(MomentImageDeviceView mi, float bias)
{
Vec2i gid(__umul24(blockIdx.x, blockDim.x) + threadIdx.x, __umul24(blockIdx.y, blockDim.y) + threadIdx.y);
if (gid.x >= mi.width || gid.y >= mi.height)
return;
auto num_moments = mi.get_num_moments(gid.x, gid.y);
if (num_moments <= 0)
return;
extern __shared__ float_complex storage[];
int t_idx = threadIdx.y * blockDim.x + threadIdx.x;
int t_offset = blockDim.x * blockDim.y;
int array_offset = mi.num_moments * t_offset;
strided_array<float_complex> code(&storage[t_idx], t_offset);
strided_array<float_complex> exp_moments(&storage[t_idx + array_offset], t_offset);
strided_array<float_complex> eval_polynom(&storage[t_idx + 2 * array_offset], t_offset);
strided_array<float_complex> temp(&storage[t_idx + 3 * array_offset], t_offset);
auto idx = mi.get_idx(gid.x, gid.y);
mi.data[idx] = cut::lerp(mi.data[idx], 0.5f, bias);
for (int i = 1; i < num_moments; ++i)
mi.data[idx + i] = cut::lerp(mi.data[idx + i], 0.0f, bias);
trigonometricToExponentialMoments(num_moments, exp_moments, &mi.data[idx]);
encode(num_moments, exp_moments, code, eval_polynom, temp);
transform_quantization_real(num_moments, code, mi.data[idx], &mi.data[idx]);
}
__global__ void prediction_encode_warping_kernel(MomentImageDeviceView mi)
{
Vec3i gid(__umul24(blockIdx.x, blockDim.x) + threadIdx.x,
__umul24(blockIdx.y, blockDim.y) + threadIdx.y,
__umul24(blockIdx.z, blockDim.z) + threadIdx.z);
if (gid.x >= mi.width || gid.y >= mi.height || gid.z >= mi.num_moments)
return;
int l = 1 + gid.z;
auto num_moments = mi.get_num_moments(gid.x, gid.y);
if (num_moments <= l)
return;
auto idx = mi.get_idx(gid.x, gid.y);
mi.data[idx + l] = prediction_coding_warp(l, mi.data[idx + l], WarpParameters{mi.coding_warp, c_coding_params});
}
void MomentImageDevice::prediction_encode(int coding_warp, float bias)
{
assert(view.is_compact);
SCOPED_CUDA_QUERY("Prediction coding");
view.prediction_code = true;
view.coding_warp = coding_warp;
dim3 threads_per_block;
dim3 num_blocks;
auto shared_mem = get_best_smem_launch_config(sizeof(float_complex) * 4, num_blocks, threads_per_block);
prediction_encode_kernel<<<num_blocks, threads_per_block, shared_mem>>>(view, bias);
if (requires_coding_parameters(coding_warp))
{
if (coding_warp == CODING_WARP_DEFAULT_TRANSFORMED)
coding_params = CodingParamType::quantize(find_coding_transform_parameters_device(view));
load_coding_params();
}
threads_per_block = dim3(4, 4, 16);
num_blocks.x = (view.width + threads_per_block.x - 1) / threads_per_block.x;
num_blocks.y = (view.height + threads_per_block.y - 1) / threads_per_block.y;
num_blocks.z = (view.num_moments - 1 + threads_per_block.z - 1) / threads_per_block.z;
prediction_encode_warping_kernel<<<num_blocks, threads_per_block>>>(view);
}
__global__ void revert_prediction_coding_kernel(MomentImageDeviceView mi)
{
Vec2i gid(__umul24(blockIdx.x, blockDim.x) + threadIdx.x, __umul24(blockIdx.y, blockDim.y) + threadIdx.y);
if (gid.x >= mi.width || gid.y >= mi.height)
return;
auto num_moments = mi.get_num_moments(gid.x, gid.y);
if (num_moments <= 0)
return;
extern __shared__ float_complex storage[];
int t_idx = threadIdx.y * blockDim.x + threadIdx.x;
int t_offset = blockDim.x * blockDim.y;
int array_offset = mi.num_moments * t_offset;
strided_array<float_complex> code(&storage[t_idx], t_offset);
strided_array<float_complex> exp_moments(&storage[t_idx + array_offset], t_offset);
strided_array<float_complex> eval_polynom(&storage[t_idx + 2 * array_offset], t_offset);
strided_array<float_complex> temp(&storage[t_idx + 3 * array_offset], t_offset);
auto idx = mi.get_idx(gid.x, gid.y);
transform_dequantization_real(num_moments, &mi.data[idx], code, WarpParameters{mi.coding_warp, c_coding_params});
decode(num_moments, code, exp_moments, eval_polynom, temp);
exponentialToTrigonometricMoments(num_moments, temp, exp_moments);
for (int l = 0; l < num_moments; ++l)
mi.data[idx + l] = temp[l].x;
}
void MomentImageDevice::revert_prediction_coding()
{
SCOPED_CUDA_QUERY("Revert prediction coding");
assert(view.prediction_code);
load_coding_params();
dim3 threads_per_block;
dim3 num_blocks;
auto shared_mem = get_best_smem_launch_config(sizeof(float_complex) * 4, num_blocks, threads_per_block);
revert_prediction_coding_kernel<<<num_blocks, threads_per_block, shared_mem>>>(view);
view.prediction_code = false;
}
__global__ void prepareMomentsFromPredictionCodingKernel(MomentImageDeviceView mi, float *pmoments)
{
Vec2i gid(__umul24(blockIdx.x, blockDim.x) + threadIdx.x, __umul24(blockIdx.y, blockDim.y) + threadIdx.y);
if (gid.x >= mi.width || gid.y >= mi.height)
return;
Vec2f ndc(gid.x / static_cast<float>(mi.width) * 2.f - 1.f, gid.y / static_cast<float>(mi.height) * 2.f - 1.f);
auto idx = mi.get_idx(gid.x, gid.y);
auto num_moments = mi.get_num_moments(gid.x, gid.y);
if (num_moments <= 0)
return;
float *prepared_moments = &pmoments[idx];
extern __shared__ float_complex storage[];
int t_idx = threadIdx.y * blockDim.x + threadIdx.x;
int t_offset = blockDim.x * blockDim.y;
int array_offset = mi.num_moments * t_offset;
strided_array<float_complex> code(&storage[t_idx], t_offset);
strided_array<float_complex> exp_moments(&storage[t_idx + array_offset], t_offset);
strided_array<float_complex> eval_polynom(&storage[t_idx + 2 * array_offset], t_offset);
strided_array<float_complex> temp(&storage[t_idx + 3 * array_offset], t_offset);
prepare_moments_from_pred_coding(num_moments, &mi.data[idx], prepared_moments, code, exp_moments, eval_polynom,
temp, WarpParameters{mi.coding_warp, c_coding_params});
}
__global__ void prepareMomentsKernel(MomentImageDeviceView mi, float *pmoments, float bias)
{
Vec2i gid(__umul24(blockIdx.x, blockDim.x) + threadIdx.x, __umul24(blockIdx.y, blockDim.y) + threadIdx.y);
if (gid.x >= mi.width || gid.y >= mi.height)
return;
Vec2f ndc(gid.x / static_cast<float>(mi.width) * 2.f - 1.f, gid.y / static_cast<float>(mi.height) * 2.f - 1.f);
auto idx = mi.get_idx(gid.x, gid.y);
auto num_moments = mi.get_num_moments(gid.x, gid.y);
if (num_moments == 0)
return;
extern __shared__ float_complex storage[];
int t_idx = threadIdx.y * blockDim.x + threadIdx.x;
int t_offset = blockDim.x * blockDim.y;
int array_offset = mi.num_moments * t_offset;
strided_array<float_complex> temp0(&storage[t_idx], t_offset);
strided_array<float_complex> temp1(&storage[t_idx + array_offset], t_offset);
strided_array<float_complex> temp2(&storage[t_idx + 2 * array_offset], t_offset);
prepare_moments_from_std_coding(num_moments, &mi.data[idx], &pmoments[idx], temp0, temp1, temp2, bias);
}
void MomentImageDevice::prepare_moments_device(float bias)
{
dim3 threadsPerBlock(4, 4);
dim3 numBlocks;
numBlocks.x = (view.width + threadsPerBlock.x - 1) / threadsPerBlock.x;
numBlocks.y = (view.height + threadsPerBlock.y - 1) / threadsPerBlock.y;
cut::dev_ptr<float> pmoments(data_size);
if (view.prediction_code)
{
SCOPED_CUDA_QUERY("Prediction preparation");
load_coding_params();
auto shared_mem = get_best_smem_launch_config(sizeof(float_complex) * 4, numBlocks, threadsPerBlock);
prepareMomentsFromPredictionCodingKernel<<<numBlocks, threadsPerBlock, shared_mem>>>(view, pmoments.get());
}
else
{
SCOPED_CUDA_QUERY("Standard preparation");
auto shared_mem = get_best_smem_launch_config(sizeof(float_complex) * 3, numBlocks, threadsPerBlock);
prepareMomentsKernel<<<numBlocks, threadsPerBlock, shared_mem>>>(view, pmoments.get(), bias);
}
std::swap(data, pmoments);
view.data = data.get();
}
void MomentImageDevice::load_coding_params()
{
if (requires_coding_parameters(view.coding_warp))
{
assert(!coding_params.empty());
auto params = CodingParamType::dequantize(coding_params);
CHECK_CUDA(cudaMemcpyToSymbol(c_coding_params, params.data(), sizeof(float) * params.size(), 0,
cudaMemcpyHostToDevice));
}
}
|
39d7f0e6413b1e30f2fc97b255e6460426e3543a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
void singleCM(PIX_NODE h_ref_node[], int ref_N, PIX_NODE h_sam_node[], int sam_N, int h_sam_match[],int h_sam_matchedCnt[])
{
//the maximum number of sample points that can be matched each time by each card
int part_sam_N = 25000000;
int part_ref_N = 8 * part_sam_N;
PIX_NODE *d_ref_node[GPU_N];
PIX_NODE *d_sam_node[GPU_N];
int *d_sam_match[GPU_N], *d_sam_matchedCnt[GPU_N];
int chunk_N = (int)ceil(sam_N * 1.0 / part_sam_N);
int chunk_id = 0;
omp_set_num_threads(GPU_N);
#pragma omp parallel
{
int i = omp_get_thread_num() % GPU_N;
checkCudaErrors(hipSetDevice(i));
checkCudaErrors(hipDeviceReset());
size_t free_mem,total_mem;
checkCudaErrors(hipMemGetInfo(&free_mem,&total_mem));
printf("Card %d before malloc %.2lf GB, total memory %.2lf GB\n",i,free_mem * 1.0 / GBSize,total_mem * 1.0 / GBSize);
checkCudaErrors(hipMalloc(&d_ref_node[i],sizeof(PIX_NODE) * part_ref_N));
checkCudaErrors(hipMalloc(&d_sam_node[i],sizeof(PIX_NODE) * part_sam_N));
checkCudaErrors(hipMalloc(&d_sam_match[i],sizeof(int) * part_sam_N * 5));
checkCudaErrors(hipMalloc(&d_sam_matchedCnt[i],sizeof(int) * part_sam_N));
checkCudaErrors(hipMemGetInfo(&free_mem,&total_mem));
printf("Card %d after malloc %.2lf GB, total memory %.2lf GB\n",i,free_mem * 1.0 / GBSize,total_mem * 1.0 / GBSize);
while(chunk_id < chunk_N)
//the total number of sample points processed by this card
{
#pragma omp atomic
chunk_id++;
int cur_sam_N;
if(chunk_id == chunk_N) // the last round
cur_sam_N = sam_N - (chunk_id - 1) * part_sam_N;
else
cur_sam_N = part_sam_N;
int start_sam_pos = (chunk_id - 1) * part_sam_N;
int end_sam_pos = start_sam_pos + cur_sam_N - 1;
int start_pix = h_sam_node[start_sam_pos].pix;
int end_pix = h_sam_node[end_sam_pos].pix;
int start_ref_pos;
if(start_pix == 0)
start_ref_pos = 0;
else
start_ref_pos = binary_search(start_pix - 1,h_ref_node,ref_N);
// start_ref_pos = get_start(start_pix,h_ref_node,ref_N);
if(start_ref_pos == -1)
continue;
int end_ref_pos = binary_search(end_pix,h_ref_node,ref_N) - 1;
if(end_ref_pos == -2)
end_ref_pos = ref_N - 1;
int cur_ref_N = end_ref_pos - start_ref_pos + 1;
dim3 block(block_size);
dim3 grid(min(65536,(int)ceil(cur_sam_N * 1.0 / block.x)));
if(cur_ref_N == 0)
continue;
printf("\n\nCard %d chunk-%d\n",i,chunk_id - 1);
printf("block.x %d grid.x %d\n",block.x,grid.x);
printf("start_sam_pos %d start_sam_pix %d end_sam_pos %d end_sam_pix %d sam_N %d\n",start_sam_pos,start_pix,end_sam_pos,end_pix,cur_sam_N);
printf("start_ref_pos %d start_ref_pix %d end_ref_pos %d end_ref_pix %d ref_N %d\n",start_ref_pos,h_ref_node[start_ref_pos].pix,end_ref_pos,h_ref_node[end_ref_pos].pix,cur_ref_N);
checkCudaErrors(hipMemset(d_sam_matchedCnt[i],0,sizeof(int) * part_sam_N));
checkCudaErrors(hipMemcpy(d_sam_node[i],h_sam_node + start_sam_pos,cur_sam_N * sizeof(PIX_NODE),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_ref_node[i],h_ref_node + start_ref_pos,cur_ref_N * sizeof(PIX_NODE), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_singleCM), dim3(grid),dim3(block), 0, 0, d_ref_node[i],cur_ref_N,d_sam_node[i],cur_sam_N,d_sam_match[i],d_sam_matchedCnt[i],start_ref_pos,start_sam_pos);
checkCudaErrors(hipMemcpy(h_sam_matchedCnt + start_sam_pos,d_sam_matchedCnt[i],cur_sam_N * sizeof(int),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_sam_match + start_sam_pos * 5,d_sam_match[i],cur_sam_N * 5 * sizeof(int),hipMemcpyDeviceToHost));
}
}
unsigned long long sum = 0;
int cnt[1000];
memset(cnt,0,sizeof(cnt));
for(int i = sam_N - 1; i >= 0; --i)
{
sum += h_sam_matchedCnt[i];
/*
cout << i << " " << h_sam_matchedCnt[i] << endl;
cout << h_sam_node[i].ra << " " << h_sam_node[i].dec << endl;
cout << "\n----------------\n" << endl;
for(int j = i * 5; j < i * 5 + min(5,h_sam_matchedCnt[i]); ++j)
{
int pos = h_sam_match[j];
cout << h_ref_node[pos].ra << " " << h_ref_node[pos].dec << endl;
}
cout << "\n--------------------\n" << endl;
*/
}
cout << "sum " << sum << endl;
cout << "ave " << sum * 1.0 / sam_N << endl;
}
| 39d7f0e6413b1e30f2fc97b255e6460426e3543a.cu | void singleCM(PIX_NODE h_ref_node[], int ref_N, PIX_NODE h_sam_node[], int sam_N, int h_sam_match[],int h_sam_matchedCnt[])
{
//the maximum number of sample points that can be matched each time by each card
int part_sam_N = 25000000;
int part_ref_N = 8 * part_sam_N;
PIX_NODE *d_ref_node[GPU_N];
PIX_NODE *d_sam_node[GPU_N];
int *d_sam_match[GPU_N], *d_sam_matchedCnt[GPU_N];
int chunk_N = (int)ceil(sam_N * 1.0 / part_sam_N);
int chunk_id = 0;
omp_set_num_threads(GPU_N);
#pragma omp parallel
{
int i = omp_get_thread_num() % GPU_N;
checkCudaErrors(cudaSetDevice(i));
checkCudaErrors(cudaDeviceReset());
size_t free_mem,total_mem;
checkCudaErrors(cudaMemGetInfo(&free_mem,&total_mem));
printf("Card %d before malloc %.2lf GB, total memory %.2lf GB\n",i,free_mem * 1.0 / GBSize,total_mem * 1.0 / GBSize);
checkCudaErrors(cudaMalloc(&d_ref_node[i],sizeof(PIX_NODE) * part_ref_N));
checkCudaErrors(cudaMalloc(&d_sam_node[i],sizeof(PIX_NODE) * part_sam_N));
checkCudaErrors(cudaMalloc(&d_sam_match[i],sizeof(int) * part_sam_N * 5));
checkCudaErrors(cudaMalloc(&d_sam_matchedCnt[i],sizeof(int) * part_sam_N));
checkCudaErrors(cudaMemGetInfo(&free_mem,&total_mem));
printf("Card %d after malloc %.2lf GB, total memory %.2lf GB\n",i,free_mem * 1.0 / GBSize,total_mem * 1.0 / GBSize);
while(chunk_id < chunk_N)
//the total number of sample points processed by this card
{
#pragma omp atomic
chunk_id++;
int cur_sam_N;
if(chunk_id == chunk_N) // the last round
cur_sam_N = sam_N - (chunk_id - 1) * part_sam_N;
else
cur_sam_N = part_sam_N;
int start_sam_pos = (chunk_id - 1) * part_sam_N;
int end_sam_pos = start_sam_pos + cur_sam_N - 1;
int start_pix = h_sam_node[start_sam_pos].pix;
int end_pix = h_sam_node[end_sam_pos].pix;
int start_ref_pos;
if(start_pix == 0)
start_ref_pos = 0;
else
start_ref_pos = binary_search(start_pix - 1,h_ref_node,ref_N);
// start_ref_pos = get_start(start_pix,h_ref_node,ref_N);
if(start_ref_pos == -1)
continue;
int end_ref_pos = binary_search(end_pix,h_ref_node,ref_N) - 1;
if(end_ref_pos == -2)
end_ref_pos = ref_N - 1;
int cur_ref_N = end_ref_pos - start_ref_pos + 1;
dim3 block(block_size);
dim3 grid(min(65536,(int)ceil(cur_sam_N * 1.0 / block.x)));
if(cur_ref_N == 0)
continue;
printf("\n\nCard %d chunk-%d\n",i,chunk_id - 1);
printf("block.x %d grid.x %d\n",block.x,grid.x);
printf("start_sam_pos %d start_sam_pix %d end_sam_pos %d end_sam_pix %d sam_N %d\n",start_sam_pos,start_pix,end_sam_pos,end_pix,cur_sam_N);
printf("start_ref_pos %d start_ref_pix %d end_ref_pos %d end_ref_pix %d ref_N %d\n",start_ref_pos,h_ref_node[start_ref_pos].pix,end_ref_pos,h_ref_node[end_ref_pos].pix,cur_ref_N);
checkCudaErrors(cudaMemset(d_sam_matchedCnt[i],0,sizeof(int) * part_sam_N));
checkCudaErrors(cudaMemcpy(d_sam_node[i],h_sam_node + start_sam_pos,cur_sam_N * sizeof(PIX_NODE),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_ref_node[i],h_ref_node + start_ref_pos,cur_ref_N * sizeof(PIX_NODE), cudaMemcpyHostToDevice));
kernel_singleCM<<<grid,block>>>(d_ref_node[i],cur_ref_N,d_sam_node[i],cur_sam_N,d_sam_match[i],d_sam_matchedCnt[i],start_ref_pos,start_sam_pos);
checkCudaErrors(cudaMemcpy(h_sam_matchedCnt + start_sam_pos,d_sam_matchedCnt[i],cur_sam_N * sizeof(int),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_sam_match + start_sam_pos * 5,d_sam_match[i],cur_sam_N * 5 * sizeof(int),cudaMemcpyDeviceToHost));
}
}
unsigned long long sum = 0;
int cnt[1000];
memset(cnt,0,sizeof(cnt));
for(int i = sam_N - 1; i >= 0; --i)
{
sum += h_sam_matchedCnt[i];
/*
cout << i << " " << h_sam_matchedCnt[i] << endl;
cout << h_sam_node[i].ra << " " << h_sam_node[i].dec << endl;
cout << "\n----------------\n" << endl;
for(int j = i * 5; j < i * 5 + min(5,h_sam_matchedCnt[i]); ++j)
{
int pos = h_sam_match[j];
cout << h_ref_node[pos].ra << " " << h_ref_node[pos].dec << endl;
}
cout << "\n--------------------\n" << endl;
*/
}
cout << "sum " << sum << endl;
cout << "ave " << sum * 1.0 / sam_N << endl;
}
|
33ca6af92d3360fdbe7ecc10577a453b48e83e41.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2015 - 2021 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <stdio.h>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
#define CUDACHECK(error) \
{ \
hipError_t localError = error; \
if (localError != hipSuccess) { \
printf("error: %s at %s:%d\n", hipGetErrorString(localError), __FILE__, __LINE__); \
} \
}
__global__
void add(int n, const float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] += x[i];
}
void prefetch (const int gpuDeviceId, const int numElements, const int repeat)
{
printf("Concurrent managed access with prefetch\n");
float *A, *B;
CUDACHECK(hipMallocManaged(&A, numElements*sizeof(float)));
CUDACHECK(hipMallocManaged(&B, numElements*sizeof(float)));
for (int i = 0; i < numElements; i++) {
A[i] = 1.0f;
B[i] = 2.0f;
}
CUDACHECK(hipDeviceSynchronize());
float maxError = 0.0f;
int blockSize = 256;
int numBlocks = (numElements + blockSize - 1) / blockSize;
dim3 dimGrid(numBlocks, 1, 1);
dim3 dimBlock(blockSize, 1, 1);
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
CUDACHECK(hipMemAdvise(A, numElements*sizeof(float), hipMemAdviseSetReadMostly, hipCpuDeviceId));
CUDACHECK(hipMemPrefetchAsync(A, numElements*sizeof(float), gpuDeviceId));
CUDACHECK(hipMemPrefetchAsync(B, numElements*sizeof(float), gpuDeviceId));
hipLaunchKernelGGL(( add) , dim3(dimGrid), dim3(dimBlock) , 0, 0, numElements, A, B);
CUDACHECK(hipMemPrefetchAsync(B, numElements*sizeof(float), hipCpuDeviceId));
CUDACHECK(hipDeviceSynchronize());
}
for (int i = 0; i < numElements; i++)
maxError = fmaxf(maxError, fabsf(B[i]-(repeat+2)));
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time: %f (ms)\n", time * 1e-6f / repeat);
CUDACHECK(hipFree(A));
CUDACHECK(hipFree(B));
bool testResult = (maxError == 0.0f);
printf("%s\n", testResult ? "PASS" : "FAIL");
}
void naive (const int numElements, const int repeat)
{
printf("Concurrent managed access without prefetch\n");
float *A, *B;
CUDACHECK(hipMallocManaged(&A, numElements*sizeof(float)));
CUDACHECK(hipMallocManaged(&B, numElements*sizeof(float)));
for (int i = 0; i < numElements; i++) {
A[i] = 1.0f;
B[i] = 2.0f;
}
CUDACHECK(hipDeviceSynchronize());
float maxError = 0.0f;
int blockSize = 256;
int numBlocks = (numElements + blockSize - 1) / blockSize;
dim3 dimGrid(numBlocks, 1, 1);
dim3 dimBlock(blockSize, 1, 1);
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( add) , dim3(dimGrid), dim3(dimBlock) , 0, 0, numElements, A, B);
CUDACHECK(hipDeviceSynchronize());
}
for (int i = 0; i < numElements; i++)
maxError = fmaxf(maxError, fabsf(B[i]-(repeat+2)));
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time: %f (ms)\n", time * 1e-6f / repeat);
CUDACHECK(hipFree(A));
CUDACHECK(hipFree(B));
bool testResult = (maxError == 0.0f);
printf("%s\n", testResult ? "PASS" : "FAIL");
}
int main(int argc, char *argv[])
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
int p_gpuDevice = 0;
CUDACHECK(hipSetDevice(p_gpuDevice));
printf("info: set device to %d\n", p_gpuDevice);
int concurrentManagedAccess = 0;
CUDACHECK(hipDeviceGetAttribute(&concurrentManagedAccess,
hipDeviceAttributeConcurrentManagedAccess,
p_gpuDevice));
if(!concurrentManagedAccess) {
printf("info: concurrent managed access not supported on device %d\n Skipped\n", p_gpuDevice);
return 0;
}
const int numElements = 64 * 1024 * 1024;
printf("------------\n");
printf(" Warmup \n");
printf("------------\n");
prefetch(p_gpuDevice, numElements, repeat);
naive(numElements, repeat);
printf("------------\n");
printf(" Done \n");
printf("------------\n");
prefetch(p_gpuDevice, numElements, repeat);
naive(numElements, repeat);
return 0;
}
| 33ca6af92d3360fdbe7ecc10577a453b48e83e41.cu | /*
Copyright (c) 2015 - 2021 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <stdio.h>
#include <math.h>
#include <chrono>
#include <cuda_runtime.h>
#define CUDACHECK(error) \
{ \
cudaError_t localError = error; \
if (localError != cudaSuccess) { \
printf("error: %s at %s:%d\n", cudaGetErrorString(localError), __FILE__, __LINE__); \
} \
}
__global__
void add(int n, const float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] += x[i];
}
void prefetch (const int gpuDeviceId, const int numElements, const int repeat)
{
printf("Concurrent managed access with prefetch\n");
float *A, *B;
CUDACHECK(cudaMallocManaged(&A, numElements*sizeof(float)));
CUDACHECK(cudaMallocManaged(&B, numElements*sizeof(float)));
for (int i = 0; i < numElements; i++) {
A[i] = 1.0f;
B[i] = 2.0f;
}
CUDACHECK(cudaDeviceSynchronize());
float maxError = 0.0f;
int blockSize = 256;
int numBlocks = (numElements + blockSize - 1) / blockSize;
dim3 dimGrid(numBlocks, 1, 1);
dim3 dimBlock(blockSize, 1, 1);
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
CUDACHECK(cudaMemAdvise(A, numElements*sizeof(float), cudaMemAdviseSetReadMostly, cudaCpuDeviceId));
CUDACHECK(cudaMemPrefetchAsync(A, numElements*sizeof(float), gpuDeviceId));
CUDACHECK(cudaMemPrefetchAsync(B, numElements*sizeof(float), gpuDeviceId));
add <<< dimGrid, dimBlock >>> (numElements, A, B);
CUDACHECK(cudaMemPrefetchAsync(B, numElements*sizeof(float), cudaCpuDeviceId));
CUDACHECK(cudaDeviceSynchronize());
}
for (int i = 0; i < numElements; i++)
maxError = fmaxf(maxError, fabsf(B[i]-(repeat+2)));
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time: %f (ms)\n", time * 1e-6f / repeat);
CUDACHECK(cudaFree(A));
CUDACHECK(cudaFree(B));
bool testResult = (maxError == 0.0f);
printf("%s\n", testResult ? "PASS" : "FAIL");
}
void naive (const int numElements, const int repeat)
{
printf("Concurrent managed access without prefetch\n");
float *A, *B;
CUDACHECK(cudaMallocManaged(&A, numElements*sizeof(float)));
CUDACHECK(cudaMallocManaged(&B, numElements*sizeof(float)));
for (int i = 0; i < numElements; i++) {
A[i] = 1.0f;
B[i] = 2.0f;
}
CUDACHECK(cudaDeviceSynchronize());
float maxError = 0.0f;
int blockSize = 256;
int numBlocks = (numElements + blockSize - 1) / blockSize;
dim3 dimGrid(numBlocks, 1, 1);
dim3 dimBlock(blockSize, 1, 1);
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
add <<< dimGrid, dimBlock >>> (numElements, A, B);
CUDACHECK(cudaDeviceSynchronize());
}
for (int i = 0; i < numElements; i++)
maxError = fmaxf(maxError, fabsf(B[i]-(repeat+2)));
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time: %f (ms)\n", time * 1e-6f / repeat);
CUDACHECK(cudaFree(A));
CUDACHECK(cudaFree(B));
bool testResult = (maxError == 0.0f);
printf("%s\n", testResult ? "PASS" : "FAIL");
}
int main(int argc, char *argv[])
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
int p_gpuDevice = 0;
CUDACHECK(cudaSetDevice(p_gpuDevice));
printf("info: set device to %d\n", p_gpuDevice);
int concurrentManagedAccess = 0;
CUDACHECK(cudaDeviceGetAttribute(&concurrentManagedAccess,
cudaDevAttrConcurrentManagedAccess,
p_gpuDevice));
if(!concurrentManagedAccess) {
printf("info: concurrent managed access not supported on device %d\n Skipped\n", p_gpuDevice);
return 0;
}
const int numElements = 64 * 1024 * 1024;
printf("------------\n");
printf(" Warmup \n");
printf("------------\n");
prefetch(p_gpuDevice, numElements, repeat);
naive(numElements, repeat);
printf("------------\n");
printf(" Done \n");
printf("------------\n");
prefetch(p_gpuDevice, numElements, repeat);
naive(numElements, repeat);
return 0;
}
|
f0ef11a1b27637ae88470f847f92467327a48065.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
int blocksize = 1024;
dim3 blocknum;
__global__ void GPUUpsweepreal(int n, int d, int *idata)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
int para = (1 << (d + 1));
int para1 = 1 << d;
if (idx < n)
{
idata[idx*para + para - 1] += idata[idx*para + para1 - 1];
}
}
__global__ void GPUUpsweep(int n, int d, int *idata)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
int para = 1 << (d + 1);
int para1 = 1 << d;
if (idx < n)
{
if (idx >= 0 && idx%para == 0)
{
idata[idx + para - 1] += idata[idx + para1 - 1];
}
}
}
__global__ void GPUdownsweepreal(int n, int d, int *idata)
{
int idx= blockDim.x*blockIdx.x + threadIdx.x;
int para = 1 << (d + 1);
int para1 = 1 << d;
if (idx < n)
{
int t = idata[idx*para + para1 - 1];
idata[idx*para + para1 - 1] = idata[idx*para + para - 1];
idata[idx*para + para - 1] += t;
}
}
__global__ void GPUdownsweep(int n, int d, int *idata)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
int para = 1 << (d + 1);
int para1 = 1 << d;
if (idx < n)
{
if (idx >= 0 && idx%para == 0)
{
int t = idata[idx + para1 - 1];
idata[idx + para1 - 1] = idata[idx + para - 1];
idata[idx + para - 1] += t;
}
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata, bool istimer) {
int dmax = ilog2ceil(n);
int adjustlen = 1 << dmax;
int *dev_arr;
hipMalloc((void**)& dev_arr, adjustlen* sizeof(int));
checkCUDAError("hipMalloc dev_arr failed!");
hipMemcpy(dev_arr, idata, adjustlen * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy failed!");
if(istimer)
timer().startGpuTimer();
for (int d = 0; d < dmax; d++)
{
blocknum = (adjustlen + blocksize - 1) / blocksize;
GPUUpsweep << <blocknum, blocksize >> > (adjustlen, d, dev_arr);
}
hipMemset(dev_arr + adjustlen - 1, 0, sizeof(int));
for (int d = dmax - 1; d >= 0; d--)
{
blocknum = (adjustlen + blocksize - 1) / blocksize;
GPUdownsweep << <blocknum, blocksize >> > (adjustlen, d, dev_arr);
}
// TODO
if (istimer)
timer().endGpuTimer();
hipMemcpy(odata, dev_arr, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_arr);
}
void realscan(int n, int *odata, const int *idata, bool istimer) {
int dmax = ilog2ceil(n);
int adjustlen = 1 << dmax;
int *dev_arr;
hipMalloc((void**)& dev_arr, adjustlen * sizeof(int));
checkCUDAError("hipMalloc dev_arr failed!");
hipMemcpy(dev_arr, idata, adjustlen * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy failed!");
if (istimer)
timer().startGpuTimer();
for (int d = 0; d < dmax; d++)
{
int interval = (1 << (d + 1));
blocknum = (adjustlen/interval + blocksize ) / blocksize;
GPUUpsweepreal << <blocknum, blocksize >> > (adjustlen/interval, d, dev_arr);
}
hipMemset(dev_arr + adjustlen - 1, 0, sizeof(int));
for (int d = dmax - 1; d >= 0; d--)
{
int interval = (1 << (d + 1));
blocknum = (adjustlen/interval + blocksize ) / blocksize;
GPUdownsweepreal << <blocknum, blocksize >> > (adjustlen/interval, d, dev_arr);
}
// TODO
if (istimer)
timer().endGpuTimer();
hipMemcpy(odata, dev_arr, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_arr);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int *dev_idata, *dev_odata, *dev_checker, *dev_indices;;
hipMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAError("hipMalloc dev_idata failed!");
hipMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAError("hipMalloc dev_odata failed!");
hipMalloc((void**)&dev_checker, n * sizeof(int));
checkCUDAError("hipMalloc dev_checker failed!");
hipMalloc((void**)&dev_indices, n * sizeof(int));
checkCUDAError("hipMalloc dev_indices failed!");
hipMemcpy(dev_idata,idata, n * sizeof(int), hipMemcpyHostToDevice);
//timer().startGpuTimer();
blocknum = (n + blocksize ) / blocksize;
Common::kernMapToBoolean << <blocknum, blocksize >> > (n, dev_checker, dev_idata);
int *checker = new int[n];int *indices = new int[n];
hipMemcpy(checker, dev_checker, n * sizeof(int), hipMemcpyDeviceToHost);
realscan(n, indices, checker,true);
hipMemcpy(dev_indices, indices, n * sizeof(int), hipMemcpyHostToDevice);
int finalct = checker[n - 1] ? 1 : 0;
int count = indices[n - 1]+finalct;
blocknum = (n + blocksize) / blocksize;
Common::kernScatter << <blocknum, blocksize >> > (n, dev_odata, dev_idata, dev_checker, dev_indices);
//timer().endGpuTimer();
hipMemcpy(odata, dev_odata, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_idata);
hipFree(dev_odata);
hipFree(dev_checker);
hipFree(dev_indices);
delete[]indices;
delete[]checker;
// TODO
return count;
}
}
}
| f0ef11a1b27637ae88470f847f92467327a48065.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
int blocksize = 1024;
dim3 blocknum;
__global__ void GPUUpsweepreal(int n, int d, int *idata)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
int para = (1 << (d + 1));
int para1 = 1 << d;
if (idx < n)
{
idata[idx*para + para - 1] += idata[idx*para + para1 - 1];
}
}
__global__ void GPUUpsweep(int n, int d, int *idata)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
int para = 1 << (d + 1);
int para1 = 1 << d;
if (idx < n)
{
if (idx >= 0 && idx%para == 0)
{
idata[idx + para - 1] += idata[idx + para1 - 1];
}
}
}
__global__ void GPUdownsweepreal(int n, int d, int *idata)
{
int idx= blockDim.x*blockIdx.x + threadIdx.x;
int para = 1 << (d + 1);
int para1 = 1 << d;
if (idx < n)
{
int t = idata[idx*para + para1 - 1];
idata[idx*para + para1 - 1] = idata[idx*para + para - 1];
idata[idx*para + para - 1] += t;
}
}
__global__ void GPUdownsweep(int n, int d, int *idata)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
int para = 1 << (d + 1);
int para1 = 1 << d;
if (idx < n)
{
if (idx >= 0 && idx%para == 0)
{
int t = idata[idx + para1 - 1];
idata[idx + para1 - 1] = idata[idx + para - 1];
idata[idx + para - 1] += t;
}
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata, bool istimer) {
int dmax = ilog2ceil(n);
int adjustlen = 1 << dmax;
int *dev_arr;
cudaMalloc((void**)& dev_arr, adjustlen* sizeof(int));
checkCUDAError("cudaMalloc dev_arr failed!");
cudaMemcpy(dev_arr, idata, adjustlen * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy failed!");
if(istimer)
timer().startGpuTimer();
for (int d = 0; d < dmax; d++)
{
blocknum = (adjustlen + blocksize - 1) / blocksize;
GPUUpsweep << <blocknum, blocksize >> > (adjustlen, d, dev_arr);
}
cudaMemset(dev_arr + adjustlen - 1, 0, sizeof(int));
for (int d = dmax - 1; d >= 0; d--)
{
blocknum = (adjustlen + blocksize - 1) / blocksize;
GPUdownsweep << <blocknum, blocksize >> > (adjustlen, d, dev_arr);
}
// TODO
if (istimer)
timer().endGpuTimer();
cudaMemcpy(odata, dev_arr, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_arr);
}
void realscan(int n, int *odata, const int *idata, bool istimer) {
int dmax = ilog2ceil(n);
int adjustlen = 1 << dmax;
int *dev_arr;
cudaMalloc((void**)& dev_arr, adjustlen * sizeof(int));
checkCUDAError("cudaMalloc dev_arr failed!");
cudaMemcpy(dev_arr, idata, adjustlen * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy failed!");
if (istimer)
timer().startGpuTimer();
for (int d = 0; d < dmax; d++)
{
int interval = (1 << (d + 1));
blocknum = (adjustlen/interval + blocksize ) / blocksize;
GPUUpsweepreal << <blocknum, blocksize >> > (adjustlen/interval, d, dev_arr);
}
cudaMemset(dev_arr + adjustlen - 1, 0, sizeof(int));
for (int d = dmax - 1; d >= 0; d--)
{
int interval = (1 << (d + 1));
blocknum = (adjustlen/interval + blocksize ) / blocksize;
GPUdownsweepreal << <blocknum, blocksize >> > (adjustlen/interval, d, dev_arr);
}
// TODO
if (istimer)
timer().endGpuTimer();
cudaMemcpy(odata, dev_arr, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_arr);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int *dev_idata, *dev_odata, *dev_checker, *dev_indices;;
cudaMalloc((void**)&dev_idata, n * sizeof(int));
checkCUDAError("cudaMalloc dev_idata failed!");
cudaMalloc((void**)&dev_odata, n * sizeof(int));
checkCUDAError("cudaMalloc dev_odata failed!");
cudaMalloc((void**)&dev_checker, n * sizeof(int));
checkCUDAError("cudaMalloc dev_checker failed!");
cudaMalloc((void**)&dev_indices, n * sizeof(int));
checkCUDAError("cudaMalloc dev_indices failed!");
cudaMemcpy(dev_idata,idata, n * sizeof(int), cudaMemcpyHostToDevice);
//timer().startGpuTimer();
blocknum = (n + blocksize ) / blocksize;
Common::kernMapToBoolean << <blocknum, blocksize >> > (n, dev_checker, dev_idata);
int *checker = new int[n];int *indices = new int[n];
cudaMemcpy(checker, dev_checker, n * sizeof(int), cudaMemcpyDeviceToHost);
realscan(n, indices, checker,true);
cudaMemcpy(dev_indices, indices, n * sizeof(int), cudaMemcpyHostToDevice);
int finalct = checker[n - 1] ? 1 : 0;
int count = indices[n - 1]+finalct;
blocknum = (n + blocksize) / blocksize;
Common::kernScatter << <blocknum, blocksize >> > (n, dev_odata, dev_idata, dev_checker, dev_indices);
//timer().endGpuTimer();
cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_idata);
cudaFree(dev_odata);
cudaFree(dev_checker);
cudaFree(dev_indices);
delete[]indices;
delete[]checker;
// TODO
return count;
}
}
}
|
014a0f99109e7254a4a61f0a5c3ef73326ad4996.hip | // !!! This is a file automatically generated by hipify!!!
/*****************************************
Emitting C Generated Code
*******************************************/
#include <string.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include "cublas_header.h"
#include <stdbool.h>
/**************** Snippet ****************/
void Snippet(int x0) {
float* x1 = (float*)malloc(4 * sizeof(float));
int x2 = 0;
while (x2 != 4) {
x1[x2] = 2.0;
x2 = x2 + 1;
}
float* x3 = (float*)malloc(4 * sizeof(float));
int x4 = 0;
while (x4 != 4) {
x3[x4] = 3.0;
x4 = x4 + 1;
}
float* x5 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x5, (size_t)(4 * sizeof(float))));
CUDA_CALL(hipMemcpy(x5, x1, (size_t)(4 * sizeof(float)), hipMemcpyHostToDevice));
float* x6 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x6, (size_t)(4 * sizeof(float))));
CUDA_CALL(hipMemcpy(x6, x3, (size_t)(4 * sizeof(float)), hipMemcpyHostToDevice));
float* x7 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(hipMalloc(&x7, (size_t)(4 * sizeof(float))));
float* x8 = (float*)malloc(4 * sizeof(float));
hipblasHandle_t x9;
CUBLAS_CALL(hipblasCreate(&x9));
float x10 = 1.0;
float x11 = 0.0;
CUBLAS_CALL(hipblasSgemm(x9, HIPBLAS_OP_N, HIPBLAS_OP_N, 2, 2, 2, &x10, x5, 2, x6, 2, &x11, x7, 2));
CUDA_CALL(hipMemcpy(x8, x7, (size_t)(4 * sizeof(float)), hipMemcpyDeviceToHost));
printf("Test GEMM:\n");
int x12 = 0;
while (x12 != 2) {
int x13 = x12;
int x14 = 0;
while (x14 != 2) {
printf("%f, ", x8[x14 * 2 + x13]);
x14 = x14 + 1;
}
printf("\n");
x12 = x12 + 1;
}
CUBLAS_CALL(hipblasSgeam(x9, HIPBLAS_OP_N, HIPBLAS_OP_N, 2, 2, &x10, x5, 2, &x11, x6, 2, x7, 2));
CUDA_CALL(hipMemcpy(x8, x7, (size_t)(4 * sizeof(float)), hipMemcpyDeviceToHost));
printf("Test GEAM:\n");
int x15 = 0;
while (x15 != 2) {
int x16 = x15;
int x17 = 0;
while (x17 != 2) {
printf("%f, ", x8[x17 * 2 + x16]);
x17 = x17 + 1;
}
printf("\n");
x15 = x15 + 1;
}
CUDA_CALL(hipFree(x5));
CUDA_CALL(hipFree(x6));
CUDA_CALL(hipFree(x7));
CUBLAS_CALL(hipblasDestroy(x9));
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
| 014a0f99109e7254a4a61f0a5c3ef73326ad4996.cu | /*****************************************
Emitting C Generated Code
*******************************************/
#include <string.h>
#include <stdlib.h>
#include "cuda_header.h"
#include <stdio.h>
#include <stdint.h>
#include "cublas_header.h"
#include <stdbool.h>
/**************** Snippet ****************/
void Snippet(int x0) {
float* x1 = (float*)malloc(4 * sizeof(float));
int x2 = 0;
while (x2 != 4) {
x1[x2] = 2.0;
x2 = x2 + 1;
}
float* x3 = (float*)malloc(4 * sizeof(float));
int x4 = 0;
while (x4 != 4) {
x3[x4] = 3.0;
x4 = x4 + 1;
}
float* x5 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x5, (size_t)(4 * sizeof(float))));
CUDA_CALL(cudaMemcpy(x5, x1, (size_t)(4 * sizeof(float)), cudaMemcpyHostToDevice));
float* x6 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x6, (size_t)(4 * sizeof(float))));
CUDA_CALL(cudaMemcpy(x6, x3, (size_t)(4 * sizeof(float)), cudaMemcpyHostToDevice));
float* x7 = (float*)malloc(0 * sizeof(float));
CUDA_CALL(cudaMalloc(&x7, (size_t)(4 * sizeof(float))));
float* x8 = (float*)malloc(4 * sizeof(float));
cublasHandle_t x9;
CUBLAS_CALL(cublasCreate(&x9));
float x10 = 1.0;
float x11 = 0.0;
CUBLAS_CALL(cublasSgemm(x9, CUBLAS_OP_N, CUBLAS_OP_N, 2, 2, 2, &x10, x5, 2, x6, 2, &x11, x7, 2));
CUDA_CALL(cudaMemcpy(x8, x7, (size_t)(4 * sizeof(float)), cudaMemcpyDeviceToHost));
printf("Test GEMM:\n");
int x12 = 0;
while (x12 != 2) {
int x13 = x12;
int x14 = 0;
while (x14 != 2) {
printf("%f, ", x8[x14 * 2 + x13]);
x14 = x14 + 1;
}
printf("\n");
x12 = x12 + 1;
}
CUBLAS_CALL(cublasSgeam(x9, CUBLAS_OP_N, CUBLAS_OP_N, 2, 2, &x10, x5, 2, &x11, x6, 2, x7, 2));
CUDA_CALL(cudaMemcpy(x8, x7, (size_t)(4 * sizeof(float)), cudaMemcpyDeviceToHost));
printf("Test GEAM:\n");
int x15 = 0;
while (x15 != 2) {
int x16 = x15;
int x17 = 0;
while (x17 != 2) {
printf("%f, ", x8[x17 * 2 + x16]);
x17 = x17 + 1;
}
printf("\n");
x15 = x15 + 1;
}
CUDA_CALL(cudaFree(x5));
CUDA_CALL(cudaFree(x6));
CUDA_CALL(cudaFree(x7));
CUBLAS_CALL(cublasDestroy(x9));
}
/*****************************************
End of C Generated Code
*******************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
printf("usage: %s <arg>\n", argv[0]);
return 0;
}
Snippet(atoi(argv[1]));
return 0;
}
|
4b670d64ad515bc48ea1c11dbfeacdfc047d4c42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void unsafe(int *shared_var, int iters)
{
for (int i = 0; i < iters; i++)
{
int old = *shared_var;
*shared_var = old + 1;
}
} | 4b670d64ad515bc48ea1c11dbfeacdfc047d4c42.cu | #include "includes.h"
__global__ void unsafe(int *shared_var, int iters)
{
for (int i = 0; i < iters; i++)
{
int old = *shared_var;
*shared_var = old + 1;
}
} |
07468fc1fbe58a7534e4d128dddcccb024f04ea4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2015 NVIDIA Corporation. All rights reserved
*
* Sample app to demonstrate use of CUPTI library to obtain metric values
* using callbacks for CUDA runtime APIs
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math_constants.h>
#include "lcutil.h"
#include <hip/hip_runtime_api.h>
// #include <gpuCUPTISampler.cuh>
#define ALIGN_SIZE (8)
#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer))
#define COMP_ITERATIONS (1024)
#define THREADS (1024)
#define BLOCKS (32768)
#define STRIDE (64*1024)
#define REGBLOCK_SIZE (4)
#define UNROLL_ITERATIONS (32)
#define deviceNum (0)
template <class T>
__global__ void benchmark( T* cdin, T* cdout){
const long ite=blockIdx.x * THREADS + threadIdx.x;
T r0;
// for(int j=0; j<COMP_ITERATIONS; j+=UNROLL_ITERATIONS){
// #pragma unroll
// for(int i=0; i<UNROLL_ITERATIONS; i++){
// r0=cdin[ite+STRIDE*i];
// cdout[ite+STRIDE*i]=r0;
// }
// }
// #pragma unroll 1024
// for(int j=0; j<COMP_ITERATIONS; j+=UNROLL_ITERATIONS){
// #pragma unroll 32
// for(int i=0; i<UNROLL_ITERATIONS; i++){
// r0=cdin[ite+STRIDE*i];
// cdout[ite+STRIDE*i]=r0;
// }
// }
int i;
#pragma unroll 1024
for(int j=0; j<COMP_ITERATIONS; j+=UNROLL_ITERATIONS) {
i=0;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
}
}
double median(int n, double x[][4],int col) {
double temp;
int i, j;
// the following two loops sort the array x in ascending order
for(i=0; i<n-1; i++) {
for(j=i+1; j<n; j++) {
if(x[j][col] < x[i][col]) {
// swap elements
temp = x[i][col];
x[i][col] = x[j][col];
x[j][col] = temp;
}
}
}
if(n%2==0) {
// if there is an even number of elements, return mean of the two elements in the middle
return((x[n/2][col] + x[n/2 - 1][col]) / 2.0);
} else {
// else return the element in the middle
return x[n/2][col];
}
}
void initializeEvents(hipEvent_t *start, hipEvent_t *stop){
CUDA_SAFE_CALL( hipEventCreate(start) );
CUDA_SAFE_CALL( hipEventCreate(stop) );
CUDA_SAFE_CALL( hipEventRecord(*start, 0) );
}
float finalizeEvents(hipEvent_t start, hipEvent_t stop){
CUDA_SAFE_CALL( hipGetLastError() );
CUDA_SAFE_CALL( hipEventRecord(stop, 0) );
CUDA_SAFE_CALL( hipEventSynchronize(stop) );
float kernel_time;
CUDA_SAFE_CALL( hipEventElapsedTime(&kernel_time, start, stop) );
CUDA_SAFE_CALL( hipEventDestroy(start) );
CUDA_SAFE_CALL( hipEventDestroy(stop) );
return kernel_time;
}
void runbench(int type, double* kernel_time, double* bandw,double* cdin,double* cdout,int L2size){
hipEvent_t start, stop;
initializeEvents(&start, &stop);
dim3 dimBlock(THREADS, 1, 1);
dim3 dimGrid(BLOCKS, 1, 1);
hipLaunchKernelGGL(( benchmark<float>), dim3(dimGrid), dim3(dimBlock) , 0, 0, (float*)cdin,(float*)cdout);
long long shared_access = 2*(long long)(COMP_ITERATIONS)*THREADS*BLOCKS;
hipDeviceSynchronize();
double time = finalizeEvents(start, stop);
double result;
if (type==0)
result = ((double)shared_access)*4/(double)time*1000./(double)(1024*1024*1024);
else
result = ((double)shared_access)*8/(double)time*1000./(double)(1024*1024*1024);
*kernel_time = time;
*bandw=result;
}
int main(int argc, char *argv[]){
// CUpti_SubscriberHandle subscriber;
hipDevice_t device = 0;
int deviceCount;
char deviceName[32];
int L2size;
hipDeviceProp_t deviceProp;
int dodouble=0;
printf("Usage: %s [device_num] [metric_name]\n", argv[0]);
int ntries;
if (argc>1){
ntries = atoi(argv[1]);
}else{
ntries = 1;
}
hipSetDevice(deviceNum);
double time[ntries][2],value[ntries][4];
// StoreDeviceInfo_DRAM(stdout,&L2size);
int size = (THREADS*BLOCKS+32*STRIDE)*sizeof(double);
size_t freeCUDAMem, totalCUDAMem;
hipMemGetInfo(&freeCUDAMem, &totalCUDAMem);
printf("Total GPU memory %lu, free %lu\n", totalCUDAMem, freeCUDAMem);
printf("Buffer size: %dMB\n", size*sizeof(double)/(1024*1024));
//Initialize Global Memory
double *cdin;
double *cdout;
CUDA_SAFE_CALL(hipMalloc((void**)&cdin, size));
CUDA_SAFE_CALL(hipMalloc((void**)&cdout, size));
// Copy data to device memory
CUDA_SAFE_CALL(hipMemset(cdin, 0, size)); // initialize to zeros
CUDA_SAFE_CALL(hipMemset(cdout, 0, size)); // initialize to zeros
// Synchronize in order to wait for memory operations to finish
CUDA_SAFE_CALL(hipDeviceSynchronize());
// make sure activity is enabled before any CUDA API
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
return -2;
}
printf("CUDA Device Number: %d\n", deviceNum);
hipDeviceGet(&device, deviceNum);
CUDA_SAFE_CALL(hipGetDeviceProperties(&deviceProp, device));
hipDeviceGetName(deviceName, 32, device);
int i;
class type;
for (i=0;i<ntries;i++){
runbench(dodouble,&time[0][0],&value[0][0],cdin,cdout,L2size);
printf("Registered time: %f ms\n",time[0][0]);
}
CUDA_SAFE_CALL( hipDeviceReset());
printf("-----------------------------------------------------------------------\n");
return 0;
}
| 07468fc1fbe58a7534e4d128dddcccb024f04ea4.cu | /*
* Copyright 2011-2015 NVIDIA Corporation. All rights reserved
*
* Sample app to demonstrate use of CUPTI library to obtain metric values
* using callbacks for CUDA runtime APIs
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math_constants.h>
#include "lcutil.h"
#include <cuda_profiler_api.h>
// #include <gpuCUPTISampler.cuh>
#define ALIGN_SIZE (8)
#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t) (buffer) & ((align)-1)) ? ((buffer) + (align) - ((uintptr_t) (buffer) & ((align)-1))) : (buffer))
#define COMP_ITERATIONS (1024)
#define THREADS (1024)
#define BLOCKS (32768)
#define STRIDE (64*1024)
#define REGBLOCK_SIZE (4)
#define UNROLL_ITERATIONS (32)
#define deviceNum (0)
template <class T>
__global__ void benchmark( T* cdin, T* cdout){
const long ite=blockIdx.x * THREADS + threadIdx.x;
T r0;
// for(int j=0; j<COMP_ITERATIONS; j+=UNROLL_ITERATIONS){
// #pragma unroll
// for(int i=0; i<UNROLL_ITERATIONS; i++){
// r0=cdin[ite+STRIDE*i];
// cdout[ite+STRIDE*i]=r0;
// }
// }
// #pragma unroll 1024
// for(int j=0; j<COMP_ITERATIONS; j+=UNROLL_ITERATIONS){
// #pragma unroll 32
// for(int i=0; i<UNROLL_ITERATIONS; i++){
// r0=cdin[ite+STRIDE*i];
// cdout[ite+STRIDE*i]=r0;
// }
// }
int i;
#pragma unroll 1024
for(int j=0; j<COMP_ITERATIONS; j+=UNROLL_ITERATIONS) {
i=0;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
r0=cdin[ite+STRIDE*i];
cdout[ite+STRIDE*i]=r0;
i++;
}
}
double median(int n, double x[][4],int col) {
double temp;
int i, j;
// the following two loops sort the array x in ascending order
for(i=0; i<n-1; i++) {
for(j=i+1; j<n; j++) {
if(x[j][col] < x[i][col]) {
// swap elements
temp = x[i][col];
x[i][col] = x[j][col];
x[j][col] = temp;
}
}
}
if(n%2==0) {
// if there is an even number of elements, return mean of the two elements in the middle
return((x[n/2][col] + x[n/2 - 1][col]) / 2.0);
} else {
// else return the element in the middle
return x[n/2][col];
}
}
void initializeEvents(cudaEvent_t *start, cudaEvent_t *stop){
CUDA_SAFE_CALL( cudaEventCreate(start) );
CUDA_SAFE_CALL( cudaEventCreate(stop) );
CUDA_SAFE_CALL( cudaEventRecord(*start, 0) );
}
float finalizeEvents(cudaEvent_t start, cudaEvent_t stop){
CUDA_SAFE_CALL( cudaGetLastError() );
CUDA_SAFE_CALL( cudaEventRecord(stop, 0) );
CUDA_SAFE_CALL( cudaEventSynchronize(stop) );
float kernel_time;
CUDA_SAFE_CALL( cudaEventElapsedTime(&kernel_time, start, stop) );
CUDA_SAFE_CALL( cudaEventDestroy(start) );
CUDA_SAFE_CALL( cudaEventDestroy(stop) );
return kernel_time;
}
void runbench(int type, double* kernel_time, double* bandw,double* cdin,double* cdout,int L2size){
cudaEvent_t start, stop;
initializeEvents(&start, &stop);
dim3 dimBlock(THREADS, 1, 1);
dim3 dimGrid(BLOCKS, 1, 1);
benchmark<float><<< dimGrid, dimBlock >>>((float*)cdin,(float*)cdout);
long long shared_access = 2*(long long)(COMP_ITERATIONS)*THREADS*BLOCKS;
cudaDeviceSynchronize();
double time = finalizeEvents(start, stop);
double result;
if (type==0)
result = ((double)shared_access)*4/(double)time*1000./(double)(1024*1024*1024);
else
result = ((double)shared_access)*8/(double)time*1000./(double)(1024*1024*1024);
*kernel_time = time;
*bandw=result;
}
int main(int argc, char *argv[]){
// CUpti_SubscriberHandle subscriber;
CUdevice device = 0;
int deviceCount;
char deviceName[32];
int L2size;
cudaDeviceProp deviceProp;
int dodouble=0;
printf("Usage: %s [device_num] [metric_name]\n", argv[0]);
int ntries;
if (argc>1){
ntries = atoi(argv[1]);
}else{
ntries = 1;
}
cudaSetDevice(deviceNum);
double time[ntries][2],value[ntries][4];
// StoreDeviceInfo_DRAM(stdout,&L2size);
int size = (THREADS*BLOCKS+32*STRIDE)*sizeof(double);
size_t freeCUDAMem, totalCUDAMem;
cudaMemGetInfo(&freeCUDAMem, &totalCUDAMem);
printf("Total GPU memory %lu, free %lu\n", totalCUDAMem, freeCUDAMem);
printf("Buffer size: %dMB\n", size*sizeof(double)/(1024*1024));
//Initialize Global Memory
double *cdin;
double *cdout;
CUDA_SAFE_CALL(cudaMalloc((void**)&cdin, size));
CUDA_SAFE_CALL(cudaMalloc((void**)&cdout, size));
// Copy data to device memory
CUDA_SAFE_CALL(cudaMemset(cdin, 0, size)); // initialize to zeros
CUDA_SAFE_CALL(cudaMemset(cdout, 0, size)); // initialize to zeros
// Synchronize in order to wait for memory operations to finish
CUDA_SAFE_CALL(cudaThreadSynchronize());
// make sure activity is enabled before any CUDA API
cuDeviceGetCount(&deviceCount);
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
return -2;
}
printf("CUDA Device Number: %d\n", deviceNum);
cuDeviceGet(&device, deviceNum);
CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, device));
cuDeviceGetName(deviceName, 32, device);
int i;
class type;
for (i=0;i<ntries;i++){
runbench(dodouble,&time[0][0],&value[0][0],cdin,cdout,L2size);
printf("Registered time: %f ms\n",time[0][0]);
}
CUDA_SAFE_CALL( cudaDeviceReset());
printf("-----------------------------------------------------------------------\n");
return 0;
}
|
30498d230a79c8ced21c49a9e3abc00d54e3d17c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/AccumulateType.h>
#include <ATen/OpMathType.h>
#include <ATen/hip/DeviceUtils.cuh>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip/block_reduce.cuh>
#include <ATen/native/hip/ForeachFunctors.cuh>
#include <ATen/native/hip/MultiTensorApply.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_foreach_norm_native.h>
#include <ATen/ops/zeros.h>
#include <ATen/ops/empty.h>
#endif
namespace at::native {
template<typename T, int NormType, int depth=1, int r_args_depth=1, int res_arg_index=0>
struct LpNormFunctor {
static_assert(NormType == 1 || NormType == 2, "foreach_norm supports only L1 and L2 norm");
using opmath_t = typename at::opmath_type<T>;
__device__ __forceinline__ void operator() (
int chunk_size,
TensorListMetadata<depth>& tl,
opmath_t* output_per_tensor,
const int max_chunks_per_tensor
) {
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.numel_for_tensor[tensor_loc];
T* x = (T*)tl.addresses[0][tensor_loc];
x += chunk_idx * chunk_size;
n -= chunk_idx * chunk_size;
__shared__ opmath_t s_vals[512];
opmath_t vals[kILP];
T r_x[kILP];
for (int i = 0; i < kILP; i++) {
vals[i] = opmath_t(0);
r_x[i] = T(0);
}
if (n % kILP == 0 && (chunk_size & kILP) == 0 && is_aligned(x)) {
for (int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) {
// load
load_store(r_x, x, 0, i_start);
#pragma unroll
for (int ii = 0; ii < kILP; ii++) {
opmath_t next = static_cast<opmath_t>(r_x[ii]);
vals[ii] += NormType == 1 ? ::abs(next) : next * next;
}
}
} else {
for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) {
#pragma unroll
for (int ii = 0; ii < kILP; ii++) {
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size) {
opmath_t next = static_cast<opmath_t>(x[i]);
vals[ii] += NormType == 1 ? ::abs(next) : next * next;
}
}
}
}
auto val = opmath_t(0);
for (int i = 0; i < kILP; i++) {
val += vals[i];
}
auto final = at::native::cuda_utils::BlockReduceSum(val, s_vals);
if (threadIdx.x == 0) {
output_per_tensor[(tl.start_tensor_this_launch + tensor_loc) * max_chunks_per_tensor + chunk_idx] = final;
}
}
};
template<typename T, int NormType, typename opmath_t = at::opmath_type<T>>
__global__ void lpnorm_cleanup(
opmath_t* output_per_tensor,
T* ret_per_tensor,
int max_chunks_per_tensor) {
__shared__ opmath_t vals[512];
opmath_t* output_this_tensor = output_per_tensor + blockIdx.x*max_chunks_per_tensor;
opmath_t val = 0;
for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) {
val += output_this_tensor[i];
}
opmath_t final = at::native::cuda_utils::BlockReduceSum<opmath_t>(val, vals);
if(threadIdx.x == 0) {
ret_per_tensor[blockIdx.x] = NormType == 1 ? final : ::sqrt(final);
}
}
// note(mkozuki): Why excluding Int and Complex from fast path
// - Int: at::norm does not support.
// - Complex: __shfl_down_sync does not support complex and foreach does not support functions whose inputs dtypes and output dtype are different.
std::vector<Tensor> foreach_tensor_norm_cuda(TensorList tensors, const Scalar& ord) {
double p;
if (ord.isIntegral(false)) {
p = ord.to<int64_t>();
} else if (ord.isFloatingPoint()) {
p = ord.to<double>();
} else {
AT_ERROR("foreach_tensor_norm_cuda expects ord to be integer or float");
}
check_foreach_api_restrictions(tensors);
const bool has_int_or_complex = std::any_of(tensors.begin(), tensors.end(), [](const auto & t) {
const auto scalar_type = t.scalar_type();
return at::isIntegralType(scalar_type, /*includeBool*/true) || at::isComplexType(scalar_type);
});
if (!can_use_fast_route(tensors) ||
has_int_or_complex ||
!(p == static_cast<double>(1) || p == static_cast<double>(2))) {
return foreach_tensor_norm_slow(tensors, ord);
}
const int ntensors = tensors.size();
int max_chunks_per_tensor = -1;
for (int t = 0; t < ntensors; t++) {
int max_chunks_this_tensor = (tensors[t].numel() + kChunkSize - 1) / kChunkSize;
if(max_chunks_this_tensor > max_chunks_per_tensor) {
max_chunks_per_tensor = max_chunks_this_tensor;
}
}
const auto options = tensors[0].options();
auto output_per_tensor = at::zeros({ntensors*max_chunks_per_tensor}, options.dtype(toOpMathType(tensors[0].scalar_type())));
auto ret_per_tensor = at::empty({ntensors}, options);
auto tensor_lists = std::vector<std::vector<Tensor>>{tensors.vec()};
if (p == static_cast<double>(1)) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, tensor_lists[0][0].scalar_type(), "foreach_tensor_norm_cuda", [&]() {
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<1>(
tensor_lists,
LpNormFunctor<scalar_t, 1>(),
output_per_tensor.data_ptr<opmath_t>(),
max_chunks_per_tensor);
C10_HIP_KERNEL_LAUNCH_CHECK();
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(output_per_tensor));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( lpnorm_cleanup<scalar_t, 1>), dim3(ntensors), dim3(512), 0, stream,
output_per_tensor.data_ptr<opmath_t>(),
ret_per_tensor.data_ptr<scalar_t>(),
max_chunks_per_tensor);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else if (p == static_cast<double>(2)) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, tensor_lists[0][0].scalar_type(), "foreach_tensor_norm_cuda", [&]() {
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<1>(
tensor_lists,
LpNormFunctor<scalar_t, 2>(),
output_per_tensor.data_ptr<opmath_t>(),
max_chunks_per_tensor);
C10_HIP_KERNEL_LAUNCH_CHECK();
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(output_per_tensor));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( lpnorm_cleanup<scalar_t, 2>), dim3(ntensors), dim3(512), 0, stream,
output_per_tensor.data_ptr<opmath_t>(),
ret_per_tensor.data_ptr<scalar_t>(),
max_chunks_per_tensor);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else {
AT_ERROR("foreach_tensor_norm_cuda fast path got unexpected ord value: ", p);
}
std::vector<Tensor> result;
result.reserve(ntensors);
for (const auto& i : c10::irange(ntensors)) {
result.emplace_back(ret_per_tensor[i]);
}
return result;
}
} // namespace at::native
| 30498d230a79c8ced21c49a9e3abc00d54e3d17c.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/AccumulateType.h>
#include <ATen/OpMathType.h>
#include <ATen/cuda/DeviceUtils.cuh>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/block_reduce.cuh>
#include <ATen/native/cuda/ForeachFunctors.cuh>
#include <ATen/native/cuda/MultiTensorApply.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_foreach_norm_native.h>
#include <ATen/ops/zeros.h>
#include <ATen/ops/empty.h>
#endif
namespace at::native {
template<typename T, int NormType, int depth=1, int r_args_depth=1, int res_arg_index=0>
struct LpNormFunctor {
static_assert(NormType == 1 || NormType == 2, "foreach_norm supports only L1 and L2 norm");
using opmath_t = typename at::opmath_type<T>;
__device__ __forceinline__ void operator() (
int chunk_size,
TensorListMetadata<depth>& tl,
opmath_t* output_per_tensor,
const int max_chunks_per_tensor
) {
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.numel_for_tensor[tensor_loc];
T* x = (T*)tl.addresses[0][tensor_loc];
x += chunk_idx * chunk_size;
n -= chunk_idx * chunk_size;
__shared__ opmath_t s_vals[512];
opmath_t vals[kILP];
T r_x[kILP];
for (int i = 0; i < kILP; i++) {
vals[i] = opmath_t(0);
r_x[i] = T(0);
}
if (n % kILP == 0 && (chunk_size & kILP) == 0 && is_aligned(x)) {
for (int i_start = threadIdx.x; i_start * kILP < n && i_start * kILP < chunk_size; i_start += blockDim.x) {
// load
load_store(r_x, x, 0, i_start);
#pragma unroll
for (int ii = 0; ii < kILP; ii++) {
opmath_t next = static_cast<opmath_t>(r_x[ii]);
vals[ii] += NormType == 1 ? ::abs(next) : next * next;
}
}
} else {
for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * kILP) {
#pragma unroll
for (int ii = 0; ii < kILP; ii++) {
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size) {
opmath_t next = static_cast<opmath_t>(x[i]);
vals[ii] += NormType == 1 ? ::abs(next) : next * next;
}
}
}
}
auto val = opmath_t(0);
for (int i = 0; i < kILP; i++) {
val += vals[i];
}
auto final = at::native::cuda_utils::BlockReduceSum(val, s_vals);
if (threadIdx.x == 0) {
output_per_tensor[(tl.start_tensor_this_launch + tensor_loc) * max_chunks_per_tensor + chunk_idx] = final;
}
}
};
template<typename T, int NormType, typename opmath_t = at::opmath_type<T>>
__global__ void lpnorm_cleanup(
opmath_t* output_per_tensor,
T* ret_per_tensor,
int max_chunks_per_tensor) {
__shared__ opmath_t vals[512];
opmath_t* output_this_tensor = output_per_tensor + blockIdx.x*max_chunks_per_tensor;
opmath_t val = 0;
for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) {
val += output_this_tensor[i];
}
opmath_t final = at::native::cuda_utils::BlockReduceSum<opmath_t>(val, vals);
if(threadIdx.x == 0) {
ret_per_tensor[blockIdx.x] = NormType == 1 ? final : ::sqrt(final);
}
}
// note(mkozuki): Why excluding Int and Complex from fast path
// - Int: at::norm does not support.
// - Complex: __shfl_down_sync does not support complex and foreach does not support functions whose inputs dtypes and output dtype are different.
std::vector<Tensor> foreach_tensor_norm_cuda(TensorList tensors, const Scalar& ord) {
double p;
if (ord.isIntegral(false)) {
p = ord.to<int64_t>();
} else if (ord.isFloatingPoint()) {
p = ord.to<double>();
} else {
AT_ERROR("foreach_tensor_norm_cuda expects ord to be integer or float");
}
check_foreach_api_restrictions(tensors);
const bool has_int_or_complex = std::any_of(tensors.begin(), tensors.end(), [](const auto & t) {
const auto scalar_type = t.scalar_type();
return at::isIntegralType(scalar_type, /*includeBool*/true) || at::isComplexType(scalar_type);
});
if (!can_use_fast_route(tensors) ||
has_int_or_complex ||
!(p == static_cast<double>(1) || p == static_cast<double>(2))) {
return foreach_tensor_norm_slow(tensors, ord);
}
const int ntensors = tensors.size();
int max_chunks_per_tensor = -1;
for (int t = 0; t < ntensors; t++) {
int max_chunks_this_tensor = (tensors[t].numel() + kChunkSize - 1) / kChunkSize;
if(max_chunks_this_tensor > max_chunks_per_tensor) {
max_chunks_per_tensor = max_chunks_this_tensor;
}
}
const auto options = tensors[0].options();
auto output_per_tensor = at::zeros({ntensors*max_chunks_per_tensor}, options.dtype(toOpMathType(tensors[0].scalar_type())));
auto ret_per_tensor = at::empty({ntensors}, options);
auto tensor_lists = std::vector<std::vector<Tensor>>{tensors.vec()};
if (p == static_cast<double>(1)) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, tensor_lists[0][0].scalar_type(), "foreach_tensor_norm_cuda", [&]() {
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<1>(
tensor_lists,
LpNormFunctor<scalar_t, 1>(),
output_per_tensor.data_ptr<opmath_t>(),
max_chunks_per_tensor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
const at::cuda::OptionalCUDAGuard device_guard(device_of(output_per_tensor));
auto stream = at::cuda::getCurrentCUDAStream();
lpnorm_cleanup<scalar_t, 1><<<ntensors, 512, 0, stream>>>(
output_per_tensor.data_ptr<opmath_t>(),
ret_per_tensor.data_ptr<scalar_t>(),
max_chunks_per_tensor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else if (p == static_cast<double>(2)) {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, tensor_lists[0][0].scalar_type(), "foreach_tensor_norm_cuda", [&]() {
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<1>(
tensor_lists,
LpNormFunctor<scalar_t, 2>(),
output_per_tensor.data_ptr<opmath_t>(),
max_chunks_per_tensor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
const at::cuda::OptionalCUDAGuard device_guard(device_of(output_per_tensor));
auto stream = at::cuda::getCurrentCUDAStream();
lpnorm_cleanup<scalar_t, 2><<<ntensors, 512, 0, stream>>>(
output_per_tensor.data_ptr<opmath_t>(),
ret_per_tensor.data_ptr<scalar_t>(),
max_chunks_per_tensor);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else {
AT_ERROR("foreach_tensor_norm_cuda fast path got unexpected ord value: ", p);
}
std::vector<Tensor> result;
result.reserve(ntensors);
for (const auto& i : c10::irange(ntensors)) {
result.emplace_back(ret_per_tensor[i]);
}
return result;
}
} // namespace at::native
|
01262264671519a044dd88c7a56837d9667eb71d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
void dp0(double* a, double* b, long N, double& dp) {
//compute the dot product of a and b using the cpu
double sum = 0;
#pragma omp parallel for reduction(+:sum)
for (long i = 0; i < N; i++) {
sum += a[i]*b[i];
}
dp = sum;
}
void mv0(double* a, double* v, long N, double* mult_ref) {
//compute mv product on cpu
double sum;
for (long i = 0; i < N; i++) {
sum = 0;
for (long j = 0; j <N; j++) {
sum += a[N*i+j]*v[j];
}
mult_ref[i] = sum;
}
}
void Check_CUDA_Error(const char *message){
hipError_t error = hipGetLastError();
if(error!=hipSuccess) {
fprintf(stderr,"ERROR: %s: %s\n", message, hipGetErrorString(error) );
exit(-1);
}
}
#define BLOCK_SIZE 1024
__global__ void reduction_kernel2(double* sum, const double* a, long N){
//reduction kernel for summing
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
__global__ void mult_kernel(double* a, double* b, double* c, long N) {
//cuda kernel to compute pairwise multiplication of a and b
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
c[idx] = a[idx] * b[idx];
}
}
void dot(double* a, double* b, long N, double& dp) {
//take in a and b vectors, apply dot product and reduction kernels.
//allocate a vector for the product
double *c_d;
hipMalloc(&c_d, N*sizeof(double));
//call the multiplication kernel
hipLaunchKernelGGL(( mult_kernel), dim3(N/BLOCK_SIZE+1),dim3(BLOCK_SIZE), 0, 0, a, b, c_d, N);
//call reduction kernel on c - from class code
double *y_d;
hipMalloc(&y_d, ((N+BLOCK_SIZE-1)/BLOCK_SIZE)*sizeof(double));
double* sum_d = y_d;
long Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
hipLaunchKernelGGL(( reduction_kernel2), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, sum_d, c_d, N);
while (Nb > 1) {
long N = Nb;
Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE);
hipLaunchKernelGGL(( reduction_kernel2), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, sum_d + Nb, sum_d, N);
sum_d += Nb;
}
//copy dot product to host
hipMemcpyAsync(&dp, sum_d, 1*sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//free memory
hipFree(c_d);
}
__global__ void mvKernel(double* a, double* v, long N, double* c) {
//kernel to compute matrix - vector product
long row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < N) {
c[row] = 0;
for (long i = 0; i < N; i++) {
c[row] += a[row*N+i] * v[i];
}
}
__syncthreads();
}
void mvProd(double* a, double* v, long N, double* mult) {
//call cuda kernels to do matrix vector product
//allocate a matrix to store matrix row with vector products element wise
double *c_d;
hipMalloc(&c_d, N*sizeof(double));
//call the kernel
hipLaunchKernelGGL(( mvKernel), dim3(N/BLOCK_SIZE+1),dim3(BLOCK_SIZE), 0, 0, a, v, N, c_d);
//copy to host
hipMemcpyAsync(mult, c_d, N*sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//free memory
hipFree(c_d);
}
int main() {
long N = (1UL<<14);
//initialize vector
double *v;
hipHostMalloc((void **) &v, N * sizeof(double));
//#pragma omp parallel for
for (long i = 0; i < N; i++) {
v[i] = 1.0/(i+1);
}
//initialize matrix
double* a;
hipHostMalloc((void **) &a, N*N*sizeof(double));
#pragma omp parallel for schedule(static)
for (long i = 0; i < N*N; i++) {
a[i] = drand48();
}
//get a reference solution for dot product
double dp;
double dp_ref;
double tt = omp_get_wtime();
dp0(v,v,N,dp_ref);
printf("CPU Bandwidth Dot Product = %f GB/s\n", 2*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
//get a reference solution for matrix vector product
double* mult_ref;
hipHostMalloc((void **) &mult_ref, N * sizeof(double));
tt = omp_get_wtime();
mv0(a, v, N, mult_ref);
printf("CPU Bandwidth Matrix-Vector Product = %f GB/s\n", 2*N*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
//copy memory to gpu
double *v_d, *a_d, *mult;
hipMalloc(&v_d, N*sizeof(double));
hipMalloc(&a_d, N*N*sizeof(double));
hipHostMalloc(&mult, N*sizeof(double));
hipMemcpyAsync(v_d, v, N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpyAsync(a_d, a, N*N*sizeof(double), hipMemcpyHostToDevice);
hipDeviceSynchronize();
//do dot product on gpu
tt = omp_get_wtime();
dot(v_d, v_d, N, dp);
printf("GPU Bandwidth Dot Product = %f GB/s\n", 2*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
//do matrix vector product on gpu
tt = omp_get_wtime();
mvProd(a_d, v_d, N, mult);
printf("GPU Bandwidth Matrix-Vector Product = %f GB/s\n", 2*N*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
//get error
double errDP = fabs(dp_ref-dp);
double errMV = 0;
for (long i = 0; i < N; i++) errMV = ::max(errMV, fabs(mult[i]-mult_ref[i]));
printf("Dot product Error: %f\n", errDP);
printf("Matrix-Vector Product Error: %f\n", errMV);
//free memory
hipFree(v_d);
hipFree(a_d);
hipHostFree(v);
hipHostFree(a);
hipHostFree(mult_ref);
//return
return 0;
} | 01262264671519a044dd88c7a56837d9667eb71d.cu | #include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
void dp0(double* a, double* b, long N, double& dp) {
//compute the dot product of a and b using the cpu
double sum = 0;
#pragma omp parallel for reduction(+:sum)
for (long i = 0; i < N; i++) {
sum += a[i]*b[i];
}
dp = sum;
}
void mv0(double* a, double* v, long N, double* mult_ref) {
//compute mv product on cpu
double sum;
for (long i = 0; i < N; i++) {
sum = 0;
for (long j = 0; j <N; j++) {
sum += a[N*i+j]*v[j];
}
mult_ref[i] = sum;
}
}
void Check_CUDA_Error(const char *message){
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess) {
fprintf(stderr,"ERROR: %s: %s\n", message, cudaGetErrorString(error) );
exit(-1);
}
}
#define BLOCK_SIZE 1024
__global__ void reduction_kernel2(double* sum, const double* a, long N){
//reduction kernel for summing
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
__global__ void mult_kernel(double* a, double* b, double* c, long N) {
//cuda kernel to compute pairwise multiplication of a and b
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
c[idx] = a[idx] * b[idx];
}
}
void dot(double* a, double* b, long N, double& dp) {
//take in a and b vectors, apply dot product and reduction kernels.
//allocate a vector for the product
double *c_d;
cudaMalloc(&c_d, N*sizeof(double));
//call the multiplication kernel
mult_kernel<<<N/BLOCK_SIZE+1,BLOCK_SIZE>>>(a, b, c_d, N);
//call reduction kernel on c - from class code
double *y_d;
cudaMalloc(&y_d, ((N+BLOCK_SIZE-1)/BLOCK_SIZE)*sizeof(double));
double* sum_d = y_d;
long Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
reduction_kernel2<<<Nb,BLOCK_SIZE>>>(sum_d, c_d, N);
while (Nb > 1) {
long N = Nb;
Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE);
reduction_kernel2<<<Nb,BLOCK_SIZE>>>(sum_d + Nb, sum_d, N);
sum_d += Nb;
}
//copy dot product to host
cudaMemcpyAsync(&dp, sum_d, 1*sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//free memory
cudaFree(c_d);
}
__global__ void mvKernel(double* a, double* v, long N, double* c) {
//kernel to compute matrix - vector product
long row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < N) {
c[row] = 0;
for (long i = 0; i < N; i++) {
c[row] += a[row*N+i] * v[i];
}
}
__syncthreads();
}
void mvProd(double* a, double* v, long N, double* mult) {
//call cuda kernels to do matrix vector product
//allocate a matrix to store matrix row with vector products element wise
double *c_d;
cudaMalloc(&c_d, N*sizeof(double));
//call the kernel
mvKernel<<<N/BLOCK_SIZE+1,BLOCK_SIZE>>>(a, v, N, c_d);
//copy to host
cudaMemcpyAsync(mult, c_d, N*sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//free memory
cudaFree(c_d);
}
int main() {
long N = (1UL<<14);
//initialize vector
double *v;
cudaMallocHost((void **) &v, N * sizeof(double));
//#pragma omp parallel for
for (long i = 0; i < N; i++) {
v[i] = 1.0/(i+1);
}
//initialize matrix
double* a;
cudaMallocHost((void **) &a, N*N*sizeof(double));
#pragma omp parallel for schedule(static)
for (long i = 0; i < N*N; i++) {
a[i] = drand48();
}
//get a reference solution for dot product
double dp;
double dp_ref;
double tt = omp_get_wtime();
dp0(v,v,N,dp_ref);
printf("CPU Bandwidth Dot Product = %f GB/s\n", 2*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
//get a reference solution for matrix vector product
double* mult_ref;
cudaMallocHost((void **) &mult_ref, N * sizeof(double));
tt = omp_get_wtime();
mv0(a, v, N, mult_ref);
printf("CPU Bandwidth Matrix-Vector Product = %f GB/s\n", 2*N*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
//copy memory to gpu
double *v_d, *a_d, *mult;
cudaMalloc(&v_d, N*sizeof(double));
cudaMalloc(&a_d, N*N*sizeof(double));
cudaMallocHost(&mult, N*sizeof(double));
cudaMemcpyAsync(v_d, v, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyAsync(a_d, a, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
//do dot product on gpu
tt = omp_get_wtime();
dot(v_d, v_d, N, dp);
printf("GPU Bandwidth Dot Product = %f GB/s\n", 2*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
//do matrix vector product on gpu
tt = omp_get_wtime();
mvProd(a_d, v_d, N, mult);
printf("GPU Bandwidth Matrix-Vector Product = %f GB/s\n", 2*N*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
//get error
double errDP = fabs(dp_ref-dp);
double errMV = 0;
for (long i = 0; i < N; i++) errMV = std::max(errMV, fabs(mult[i]-mult_ref[i]));
printf("Dot product Error: %f\n", errDP);
printf("Matrix-Vector Product Error: %f\n", errMV);
//free memory
cudaFree(v_d);
cudaFree(a_d);
cudaFreeHost(v);
cudaFreeHost(a);
cudaFreeHost(mult_ref);
//return
return 0;
} |
783d07c7085c86bb6fcaa3b81e51b722b3a465c8.hip | // !!! This is a file automatically generated by hipify!!!
#include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "hip/hip_runtime.h"
#include "utility/src/csrmv.h"
#include "utility/src/print.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
// V has to be int, or long long int
template <typename T, typename V>
__global__ void computeMax(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
V* x_max
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_pins; i += blockDim.x * gridDim.x)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
atomicMax(&x_max[net_id], (V)(x[i]));
__syncthreads();
}
}
}
// V has to be int, or long long int
template <typename T, typename V>
__global__ void computeMin(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
V* x_min
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_pins; i += blockDim.x * gridDim.x)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
atomicMin(&x_min[net_id], (V)(x[i]));
__syncthreads();
}
}
}
template <typename T, typename V>
__global__ void computeExp(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
V* x_max,
T* exp_x
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_pins; i += blockDim.x * gridDim.x)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
exp_x[i] = exp((x[i]-x_max[net_id])/(*gamma));
}
}
}
template <typename T, typename V>
__global__ void computeNegExp(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
V* x_min,
T* exp_nx
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_pins; i += blockDim.x * gridDim.x)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
exp_nx[i] = exp(-(x[i]-x_min[net_id])/(*gamma));
}
}
}
template <typename T>
__global__ void computeExpSum(
const T* exp_x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
T* exp_x_sum
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_pins; i += blockDim.x * gridDim.x)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
atomicAdd(&exp_x_sum[net_id], exp_x[i]);
__syncthreads();
}
}
}
template <typename T, typename V>
__global__ void computeLogSumExp(
const T* exp_x_sum,
const V* x_max,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nets; i += blockDim.x * gridDim.x)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_x_sum[i]) + (T)x_max[i];
}
}
}
template <typename T, typename V>
__global__ void computeLogSumNegExp(
const T* exp_nx_sum,
const V* x_min,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nets; i += blockDim.x * gridDim.x)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_nx_sum[i]) - (T)x_min[i];
}
}
}
template <typename T>
__global__ void computeLogSumExpWirelengthGrad(
const T* exp_x, const T* exp_nx,
const T* exp_x_sum, const T* exp_nx_sum,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
const T* grad_tensor,
T* grad_x_tensor
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_pins; i += blockDim.x * gridDim.x)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
grad_x_tensor[i] = (exp_x[i]/exp_x_sum[net_id] - exp_nx[i]/exp_nx_sum[net_id])*(*grad_tensor);
}
}
}
template <typename T, typename V>
int computeLogSumExpWirelengthCudaAtomicLauncher(
const T* x, const T* y,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
T* exp_xy, T* exp_nxy,
T* exp_xy_sum, T* exp_nxy_sum,
V* xy_max, V* xy_min,
T* partial_wl, // wirelength of each net
const T* grad_tensor,
T* grad_x_tensor, T* grad_y_tensor // the gradient is partial total wirelength to partial pin position
)
{
int thread_count = 1024;
int block_count = 32; // separate x and y
hipError_t status;
hipStream_t stream_x_exp;
hipStream_t stream_nx_exp;
hipStream_t stream_y_exp;
hipStream_t stream_ny_exp;
status = hipStreamCreate(&stream_x_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_x_exp\n");
fflush(stdout);
return 1;
}
status = hipStreamCreate(&stream_y_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_y_exp\n");
fflush(stdout);
return 1;
}
if (grad_tensor)
{
hipLaunchKernelGGL(( computeLogSumExpWirelengthGrad), dim3(block_count), dim3(thread_count), 0, stream_x_exp,
exp_xy, exp_nxy,
exp_xy_sum, exp_nxy_sum,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
grad_tensor,
grad_x_tensor
);
hipLaunchKernelGGL(( computeLogSumExpWirelengthGrad), dim3(block_count), dim3(thread_count), 0, stream_y_exp,
exp_xy+num_pins, exp_nxy+num_pins,
exp_xy_sum+num_nets, exp_nxy_sum+num_nets,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
grad_tensor,
grad_y_tensor
);
}
else
{
status = hipStreamCreate(&stream_nx_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_nx_exp\n");
fflush(stdout);
return 1;
}
status = hipStreamCreate(&stream_ny_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_ny_exp\n");
fflush(stdout);
return 1;
}
// compute max/min
hipLaunchKernelGGL(( computeMax), dim3(block_count), dim3(thread_count), 0, stream_x_exp,
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_max
);
hipLaunchKernelGGL(( computeMin), dim3(block_count), dim3(thread_count), 0, stream_nx_exp,
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_min
);
hipLaunchKernelGGL(( computeMax), dim3(block_count), dim3(thread_count), 0, stream_y_exp,
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_max+num_nets
);
hipLaunchKernelGGL(( computeMin), dim3(block_count), dim3(thread_count), 0, stream_ny_exp,
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_min+num_nets
);
// compute exp and negative exp
hipLaunchKernelGGL(( computeExp), dim3(block_count), dim3(thread_count), 0, stream_x_exp,
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_max,
exp_xy
);
hipLaunchKernelGGL(( computeNegExp), dim3(block_count), dim3(thread_count), 0, stream_nx_exp,
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_min,
exp_nxy
);
hipLaunchKernelGGL(( computeExp), dim3(block_count), dim3(thread_count), 0, stream_y_exp,
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_max+num_nets,
exp_xy+num_pins
);
hipLaunchKernelGGL(( computeNegExp), dim3(block_count), dim3(thread_count), 0, stream_ny_exp,
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_min+num_nets,
exp_nxy+num_pins
);
// compute exp sum
hipLaunchKernelGGL(( computeExpSum), dim3(block_count), dim3(thread_count), 0, stream_x_exp,
exp_xy,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_xy_sum
);
hipLaunchKernelGGL(( computeExpSum), dim3(block_count), dim3(thread_count), 0, stream_nx_exp,
exp_nxy,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_nxy_sum
);
hipLaunchKernelGGL(( computeExpSum), dim3(block_count), dim3(thread_count), 0, stream_y_exp,
exp_xy+num_pins,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_xy_sum+num_nets
);
hipLaunchKernelGGL(( computeExpSum), dim3(block_count), dim3(thread_count), 0, stream_ny_exp,
exp_nxy+num_pins,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_nxy_sum+num_nets
);
// compute log sum exp
hipLaunchKernelGGL(( computeLogSumExp), dim3(block_count), dim3(thread_count), 0, stream_x_exp,
exp_xy_sum,
xy_max,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl
);
hipLaunchKernelGGL(( computeLogSumNegExp), dim3(block_count), dim3(thread_count), 0, stream_nx_exp,
exp_nxy_sum,
xy_min,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl+num_nets
);
hipLaunchKernelGGL(( computeLogSumExp), dim3(block_count), dim3(thread_count), 0, stream_y_exp,
exp_xy_sum+num_nets,
xy_max+num_nets,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl+2*num_nets
);
hipLaunchKernelGGL(( computeLogSumNegExp), dim3(block_count), dim3(thread_count), 0, stream_ny_exp,
exp_nxy_sum+num_nets,
xy_min+num_nets,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl+3*num_nets
);
// I move out the summation to use ATen
// significant speedup is observed
//sumArray<<<1, 1>>>(partial_wl, 2*num_nets, wl);
status = hipStreamDestroy(stream_nx_exp);
stream_nx_exp = 0;
if (status != hipSuccess)
{
printf("stream_nx_exp destroy failed\n");
fflush(stdout);
return 1;
}
status = hipStreamDestroy(stream_ny_exp);
stream_ny_exp = 0;
if (status != hipSuccess)
{
printf("stream_ny_exp destroy failed\n");
fflush(stdout);
return 1;
}
}
/* destroy stream */
status = hipStreamDestroy(stream_x_exp);
stream_x_exp = 0;
if (status != hipSuccess)
{
printf("stream_x_exp destroy failed\n");
fflush(stdout);
return 1;
}
status = hipStreamDestroy(stream_y_exp);
stream_y_exp = 0;
if (status != hipSuccess)
{
printf("stream_y_exp destroy failed\n");
fflush(stdout);
return 1;
}
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T, V) \
int instantiateComputeLogSumExpWirelengthAtomicLauncher(\
const T* x, const T* y, \
const int* pin2net_map, \
const unsigned char* net_mask, \
int num_nets, \
int num_pins, \
const T* gamma, \
T* exp_xy, T* exp_nxy, \
T* exp_xy_sum, T* exp_nxy_sum,\
V* xy_max, V* xy_min, \
T* partial_wl, \
const T* grad_tensor, \
T* grad_x_tensor, T* grad_y_tensor \
)\
{\
return computeLogSumExpWirelengthCudaAtomicLauncher(\
x, y, \
pin2net_map, \
net_mask, \
num_nets,\
num_pins,\
gamma, \
exp_xy, exp_nxy, \
exp_xy_sum, exp_nxy_sum, \
xy_max, xy_min, \
partial_wl, \
grad_tensor, \
grad_x_tensor, grad_y_tensor \
);\
}
REGISTER_KERNEL_LAUNCHER(float, int);
REGISTER_KERNEL_LAUNCHER(double, int);
DREAMPLACE_END_NAMESPACE
| 783d07c7085c86bb6fcaa3b81e51b722b3a465c8.cu | #include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "cuda_runtime.h"
#include "utility/src/csrmv.h"
#include "utility/src/print.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
// V has to be int, or long long int
template <typename T, typename V>
__global__ void computeMax(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
V* x_max
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_pins; i += blockDim.x * gridDim.x)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
atomicMax(&x_max[net_id], (V)(x[i]));
__syncthreads();
}
}
}
// V has to be int, or long long int
template <typename T, typename V>
__global__ void computeMin(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
V* x_min
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_pins; i += blockDim.x * gridDim.x)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
atomicMin(&x_min[net_id], (V)(x[i]));
__syncthreads();
}
}
}
template <typename T, typename V>
__global__ void computeExp(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
V* x_max,
T* exp_x
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_pins; i += blockDim.x * gridDim.x)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
exp_x[i] = exp((x[i]-x_max[net_id])/(*gamma));
}
}
}
template <typename T, typename V>
__global__ void computeNegExp(
const T* x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
V* x_min,
T* exp_nx
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_pins; i += blockDim.x * gridDim.x)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
exp_nx[i] = exp(-(x[i]-x_min[net_id])/(*gamma));
}
}
}
template <typename T>
__global__ void computeExpSum(
const T* exp_x,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
T* exp_x_sum
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_pins; i += blockDim.x * gridDim.x)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
atomicAdd(&exp_x_sum[net_id], exp_x[i]);
__syncthreads();
}
}
}
template <typename T, typename V>
__global__ void computeLogSumExp(
const T* exp_x_sum,
const V* x_max,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nets; i += blockDim.x * gridDim.x)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_x_sum[i]) + (T)x_max[i];
}
}
}
template <typename T, typename V>
__global__ void computeLogSumNegExp(
const T* exp_nx_sum,
const V* x_min,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_nets; i += blockDim.x * gridDim.x)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_nx_sum[i]) - (T)x_min[i];
}
}
}
template <typename T>
__global__ void computeLogSumExpWirelengthGrad(
const T* exp_x, const T* exp_nx,
const T* exp_x_sum, const T* exp_nx_sum,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
const T* grad_tensor,
T* grad_x_tensor
)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num_pins; i += blockDim.x * gridDim.x)
{
int net_id = pin2net_map[i];
if (net_mask[net_id])
{
grad_x_tensor[i] = (exp_x[i]/exp_x_sum[net_id] - exp_nx[i]/exp_nx_sum[net_id])*(*grad_tensor);
}
}
}
template <typename T, typename V>
int computeLogSumExpWirelengthCudaAtomicLauncher(
const T* x, const T* y,
const int* pin2net_map,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
T* exp_xy, T* exp_nxy,
T* exp_xy_sum, T* exp_nxy_sum,
V* xy_max, V* xy_min,
T* partial_wl, // wirelength of each net
const T* grad_tensor,
T* grad_x_tensor, T* grad_y_tensor // the gradient is partial total wirelength to partial pin position
)
{
int thread_count = 1024;
int block_count = 32; // separate x and y
cudaError_t status;
cudaStream_t stream_x_exp;
cudaStream_t stream_nx_exp;
cudaStream_t stream_y_exp;
cudaStream_t stream_ny_exp;
status = cudaStreamCreate(&stream_x_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_x_exp\n");
fflush(stdout);
return 1;
}
status = cudaStreamCreate(&stream_y_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_y_exp\n");
fflush(stdout);
return 1;
}
if (grad_tensor)
{
computeLogSumExpWirelengthGrad<<<block_count, thread_count, 0, stream_x_exp>>>(
exp_xy, exp_nxy,
exp_xy_sum, exp_nxy_sum,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
grad_tensor,
grad_x_tensor
);
computeLogSumExpWirelengthGrad<<<block_count, thread_count, 0, stream_y_exp>>>(
exp_xy+num_pins, exp_nxy+num_pins,
exp_xy_sum+num_nets, exp_nxy_sum+num_nets,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
grad_tensor,
grad_y_tensor
);
}
else
{
status = cudaStreamCreate(&stream_nx_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_nx_exp\n");
fflush(stdout);
return 1;
}
status = cudaStreamCreate(&stream_ny_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_ny_exp\n");
fflush(stdout);
return 1;
}
// compute max/min
computeMax<<<block_count, thread_count, 0, stream_x_exp>>>(
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_max
);
computeMin<<<block_count, thread_count, 0, stream_nx_exp>>>(
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_min
);
computeMax<<<block_count, thread_count, 0, stream_y_exp>>>(
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_max+num_nets
);
computeMin<<<block_count, thread_count, 0, stream_ny_exp>>>(
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
xy_min+num_nets
);
// compute exp and negative exp
computeExp<<<block_count, thread_count, 0, stream_x_exp>>>(
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_max,
exp_xy
);
computeNegExp<<<block_count, thread_count, 0, stream_nx_exp>>>(
x,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_min,
exp_nxy
);
computeExp<<<block_count, thread_count, 0, stream_y_exp>>>(
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_max+num_nets,
exp_xy+num_pins
);
computeNegExp<<<block_count, thread_count, 0, stream_ny_exp>>>(
y,
pin2net_map,
net_mask,
num_nets,
num_pins,
gamma,
xy_min+num_nets,
exp_nxy+num_pins
);
// compute exp sum
computeExpSum<<<block_count, thread_count, 0, stream_x_exp>>>(
exp_xy,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_xy_sum
);
computeExpSum<<<block_count, thread_count, 0, stream_nx_exp>>>(
exp_nxy,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_nxy_sum
);
computeExpSum<<<block_count, thread_count, 0, stream_y_exp>>>(
exp_xy+num_pins,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_xy_sum+num_nets
);
computeExpSum<<<block_count, thread_count, 0, stream_ny_exp>>>(
exp_nxy+num_pins,
pin2net_map,
net_mask,
num_nets,
num_pins,
exp_nxy_sum+num_nets
);
// compute log sum exp
computeLogSumExp<<<block_count, thread_count, 0, stream_x_exp>>>(
exp_xy_sum,
xy_max,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl
);
computeLogSumNegExp<<<block_count, thread_count, 0, stream_nx_exp>>>(
exp_nxy_sum,
xy_min,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl+num_nets
);
computeLogSumExp<<<block_count, thread_count, 0, stream_y_exp>>>(
exp_xy_sum+num_nets,
xy_max+num_nets,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl+2*num_nets
);
computeLogSumNegExp<<<block_count, thread_count, 0, stream_ny_exp>>>(
exp_nxy_sum+num_nets,
xy_min+num_nets,
pin2net_map,
net_mask,
num_nets,
gamma,
partial_wl+3*num_nets
);
// I move out the summation to use ATen
// significant speedup is observed
//sumArray<<<1, 1>>>(partial_wl, 2*num_nets, wl);
status = cudaStreamDestroy(stream_nx_exp);
stream_nx_exp = 0;
if (status != cudaSuccess)
{
printf("stream_nx_exp destroy failed\n");
fflush(stdout);
return 1;
}
status = cudaStreamDestroy(stream_ny_exp);
stream_ny_exp = 0;
if (status != cudaSuccess)
{
printf("stream_ny_exp destroy failed\n");
fflush(stdout);
return 1;
}
}
/* destroy stream */
status = cudaStreamDestroy(stream_x_exp);
stream_x_exp = 0;
if (status != cudaSuccess)
{
printf("stream_x_exp destroy failed\n");
fflush(stdout);
return 1;
}
status = cudaStreamDestroy(stream_y_exp);
stream_y_exp = 0;
if (status != cudaSuccess)
{
printf("stream_y_exp destroy failed\n");
fflush(stdout);
return 1;
}
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T, V) \
int instantiateComputeLogSumExpWirelengthAtomicLauncher(\
const T* x, const T* y, \
const int* pin2net_map, \
const unsigned char* net_mask, \
int num_nets, \
int num_pins, \
const T* gamma, \
T* exp_xy, T* exp_nxy, \
T* exp_xy_sum, T* exp_nxy_sum,\
V* xy_max, V* xy_min, \
T* partial_wl, \
const T* grad_tensor, \
T* grad_x_tensor, T* grad_y_tensor \
)\
{\
return computeLogSumExpWirelengthCudaAtomicLauncher(\
x, y, \
pin2net_map, \
net_mask, \
num_nets,\
num_pins,\
gamma, \
exp_xy, exp_nxy, \
exp_xy_sum, exp_nxy_sum, \
xy_max, xy_min, \
partial_wl, \
grad_tensor, \
grad_x_tensor, grad_y_tensor \
);\
}
REGISTER_KERNEL_LAUNCHER(float, int);
REGISTER_KERNEL_LAUNCHER(double, int);
DREAMPLACE_END_NAMESPACE
|
c8aa4926ad048fdf71e6e0429d5f433b8df8e1ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zgemvmdot.cu, normal z -> c, Sun Nov 20 20:20:41 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_c
// initialize arrays with zero
__global__ void
magma_cgpumemzero(
magmaFloatComplex * d,
int n,
int k )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
for (int j = 0; j < k; j++)
d[ i+j*n ] = MAGMA_C_MAKE( 0.0, 0.0 );
}
}
// dot product
__global__ void
magma_cdot_kernel(
int Gs,
int n,
magmaFloatComplex * v,
magmaFloatComplex * r,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_C_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// dot product for multiple vectors
__global__ void
magma_cblockdot_kernel(
int Gs,
int n,
int k,
magmaFloatComplex * v,
magmaFloatComplex * r,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// k vectors v(i)
if (i<n){
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = v[i+j*n] * r[i];
}
else {
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for multiple vectors
__global__ void
magma_cblockreduce_kernel(
int Gs,
int n,
int k,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ]
: MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// accelerated reduction for one vector
__global__ void
magma_creduce_kernel_fast( int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated block reduction for multiple vectors
__global__ void
magma_cblockreduce_kernel_fast(
int Gs,
int n,
int k,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<k; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloatComplex_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloatComplex_ptr
r
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmdotc(
magma_int_t n,
magma_int_t k,
magmaFloatComplex_ptr v,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (k)* (local_block_size) * sizeof( magmaFloatComplex ); // k vecs
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if (k>1) {
hipLaunchKernelGGL(( magma_cblockdot_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, k, v, r, d1 );
}
else {
hipLaunchKernelGGL(( magma_cdot_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, v, r, d1 );
}
/*
// not necessary to zero GPU mem
magma_cgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d1, n*k,1 );
magma_cgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d2, n*k,1 );
//magmablas_claset( MagmaFull, n, k, d1, n, UNKNOWN );
//magmablas_claset( MagmaFull, n, k, d2, n, UNKNOWN );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
magma_cblockreduce_kernel<<< Gs_next.x, Bs.x, Ms, queue->cuda_stream >>>
( Gs.x, n, k, aux1, aux2 );
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
for( int j=0; j<k; j++) {
magma_ccopyvector( 1, aux1+j*n, 1, skp+j, 1, UNKNOWN );
}
*/
if ( k>1) {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_cblockreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, k, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
else {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_creduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
magma_ccopyvector_async( k, aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloatComplex_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloatComplex_ptr
r
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cgemvmdot(
magma_int_t n,
magma_int_t k,
magmaFloatComplex_ptr v,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int rows_left = k;
int offset = 0;
int chunk_size = 4;
// process in chunks of 10 - has to be adapted to hardware and precision
while( rows_left > (chunk_size) ) {
magma_cmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue );
offset = offset + chunk_size;
rows_left = rows_left-chunk_size;
}
// process rest
magma_cmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue );
return MAGMA_SUCCESS;
}
| c8aa4926ad048fdf71e6e0429d5f433b8df8e1ae.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zgemvmdot.cu, normal z -> c, Sun Nov 20 20:20:41 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_c
// initialize arrays with zero
__global__ void
magma_cgpumemzero(
magmaFloatComplex * d,
int n,
int k )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
for (int j = 0; j < k; j++)
d[ i+j*n ] = MAGMA_C_MAKE( 0.0, 0.0 );
}
}
// dot product
__global__ void
magma_cdot_kernel(
int Gs,
int n,
magmaFloatComplex * v,
magmaFloatComplex * r,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_C_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// dot product for multiple vectors
__global__ void
magma_cblockdot_kernel(
int Gs,
int n,
int k,
magmaFloatComplex * v,
magmaFloatComplex * r,
magmaFloatComplex * vtmp)
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// k vectors v(i)
if (i<n){
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = v[i+j*n] * r[i];
}
else {
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for multiple vectors
__global__ void
magma_cblockreduce_kernel(
int Gs,
int n,
int k,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ]
: MAGMA_C_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// accelerated reduction for one vector
__global__ void
magma_creduce_kernel_fast( int Gs,
int n,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated block reduction for multiple vectors
__global__ void
magma_cblockreduce_kernel_fast(
int Gs,
int n,
int k,
magmaFloatComplex * vtmp,
magmaFloatComplex * vtmp2 )
{
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<k; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_C_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloatComplex_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloatComplex_ptr
r
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cmdotc(
magma_int_t n,
magma_int_t k,
magmaFloatComplex_ptr v,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (k)* (local_block_size) * sizeof( magmaFloatComplex ); // k vecs
magmaFloatComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if (k>1) {
magma_cblockdot_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>( Gs.x, n, k, v, r, d1 );
}
else {
magma_cdot_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>( Gs.x, n, v, r, d1 );
}
/*
// not necessary to zero GPU mem
magma_cgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d1, n*k,1 );
magma_cgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d2, n*k,1 );
//magmablas_claset( MagmaFull, n, k, d1, n, UNKNOWN );
//magmablas_claset( MagmaFull, n, k, d2, n, UNKNOWN );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
magma_cblockreduce_kernel<<< Gs_next.x, Bs.x, Ms, queue->cuda_stream >>>
( Gs.x, n, k, aux1, aux2 );
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
for( int j=0; j<k; j++) {
magma_ccopyvector( 1, aux1+j*n, 1, skp+j, 1, UNKNOWN );
}
*/
if ( k>1) {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_cblockreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, k, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
else {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_creduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
magma_ccopyvector_async( k, aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaFloatComplex_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaFloatComplex_ptr
r
@param[in]
d1 magmaFloatComplex_ptr
workspace
@param[in]
d2 magmaFloatComplex_ptr
workspace
@param[out]
skp magmaFloatComplex_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_c
********************************************************************/
extern "C" magma_int_t
magma_cgemvmdot(
magma_int_t n,
magma_int_t k,
magmaFloatComplex_ptr v,
magmaFloatComplex_ptr r,
magmaFloatComplex_ptr d1,
magmaFloatComplex_ptr d2,
magmaFloatComplex_ptr skp,
magma_queue_t queue )
{
int rows_left = k;
int offset = 0;
int chunk_size = 4;
// process in chunks of 10 - has to be adapted to hardware and precision
while( rows_left > (chunk_size) ) {
magma_cmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue );
offset = offset + chunk_size;
rows_left = rows_left-chunk_size;
}
// process rest
magma_cmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue );
return MAGMA_SUCCESS;
}
|
e455bfd01f20e1749a521d999bf9192c3225ea8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void kernel1(int m, int n, int k, double *d_A, double *d_B, double *d_C){
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
d_C[i*n + j] = 0.0;
}
}
//mkn
for(int i = 0; i < m; i++){
for(int s = 0; s < k; s++){
for(int j = 0; j < n; j++){
d_C[i*n + j] += d_A[i*k + s] * d_B[s*n + j]; }
}
}
}
extern "C" {
void matmult_gpu1(int m, int n, int k, double *A, double *B, double *C) {
double *d_A, *d_B, *d_C; //variable on device
int size_matrix_A = m * k * sizeof(double);
hipMalloc((void**)&d_A, size_matrix_A); // allocate memory on GPU
int size_matrix_B = k * n * sizeof(double);
hipMalloc((void**)&d_B, size_matrix_B);
int size_matrix_C = m * n * sizeof(double);
hipMalloc((void**)&d_C, size_matrix_C);
//copy A and B to GPU
hipMemcpy(d_A, A, size_matrix_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, size_matrix_B, hipMemcpyHostToDevice);
// Launch kernel using 1 thread per block and 1 block
hipLaunchKernelGGL(( kernel1), dim3(1),dim3(1), 0, 0, m, n, k, d_A, d_B, d_C);
hipDeviceSynchronize();
//transfer C back to CPU
hipMemcpy(C, d_C, size_matrix_C, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C); }
}
| e455bfd01f20e1749a521d999bf9192c3225ea8e.cu | __global__ void kernel1(int m, int n, int k, double *d_A, double *d_B, double *d_C){
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
d_C[i*n + j] = 0.0;
}
}
//mkn
for(int i = 0; i < m; i++){
for(int s = 0; s < k; s++){
for(int j = 0; j < n; j++){
d_C[i*n + j] += d_A[i*k + s] * d_B[s*n + j]; }
}
}
}
extern "C" {
void matmult_gpu1(int m, int n, int k, double *A, double *B, double *C) {
double *d_A, *d_B, *d_C; //variable on device
int size_matrix_A = m * k * sizeof(double);
cudaMalloc((void**)&d_A, size_matrix_A); // allocate memory on GPU
int size_matrix_B = k * n * sizeof(double);
cudaMalloc((void**)&d_B, size_matrix_B);
int size_matrix_C = m * n * sizeof(double);
cudaMalloc((void**)&d_C, size_matrix_C);
//copy A and B to GPU
cudaMemcpy(d_A, A, size_matrix_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size_matrix_B, cudaMemcpyHostToDevice);
// Launch kernel using 1 thread per block and 1 block
kernel1<<<1,1>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
//transfer C back to CPU
cudaMemcpy(C, d_C, size_matrix_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C); }
}
|
98c47dc959f701cfc128bf0b65fdbb8c9d877b9a.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cmath>
#include <ctime>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define THREAD 1024
#define POWER 25
#define k 16
using namespace std;
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int i = k*(blockIdx.x*blockDim.x + threadIdx.x);
for (int j = i; j < i + k; j++) {
if (j < n) y[j] = x[j] + y[j];
}
}
int main(void)
{
int n = (1 << POWER) - 7364;
float *x, *y, *d_x, *d_y;
cout << "Size of input: " << n << endl;
unsigned int blocks = ((n + k*THREAD - 1) / (k*THREAD));
cout << "Number of blocks: " << blocks << "\t\tNumber of threads per block: " << THREAD << "\t\tNumber of Indies per thread: " << k << endl;
//Allocate memory on CPU
x = (float*)malloc(n * sizeof(float));
y = (float*)malloc(n * sizeof(float));
//Allocate memory on GPU
hipMalloc(&d_x, n * sizeof(float));
hipMalloc(&d_y, n * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < n; i++) {
x[i] = 3.0f;
y[i] = 2.0f;
}
//Sequential add vectors
clock_t begin = clock();
for (int i = 0; i < n; i++) {
y[i] = x[i] + y[i];
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC * 1000;
cout << "The running time for sequential addtition is " << time_spent << " miliseconds." << endl;
// initialize x and y arrays on the host
for (int i = 0; i < n; i++) {
x[i] = 3.0f;
y[i] = 2.0f;
}
begin = clock();
//Copy memory from CPU to GPU
hipMemcpy(d_x, x, n * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, n * sizeof(float), hipMemcpyHostToDevice);
// Perform Addition on GPU
add << < blocks, THREAD >> >(n, d_x, d_y);
//Copy memory from GPU to CPU
hipMemcpy(y, d_y, n * sizeof(float), hipMemcpyDeviceToHost);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC * 1000;
//Verify Results
bool isCorrect = true;
for (int i = 0; i < n; i++){
if (y[i] != 5.0) {
cout << "Incorrect Result at " << i << " = " << y[i] << endl;
isCorrect = false;
break;
}
}
if (isCorrect) cout << "The running time for parallel addtition is " << time_spent << " miliseconds." << endl;
//Free memory on GPU
hipFree(d_x);
hipFree(d_y);
//Free memory on CPU
free(x);
free(y);
return 0;
} | 98c47dc959f701cfc128bf0b65fdbb8c9d877b9a.cu | #include <iostream>
#include <cmath>
#include <ctime>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define THREAD 1024
#define POWER 25
#define k 16
using namespace std;
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int i = k*(blockIdx.x*blockDim.x + threadIdx.x);
for (int j = i; j < i + k; j++) {
if (j < n) y[j] = x[j] + y[j];
}
}
int main(void)
{
int n = (1 << POWER) - 7364;
float *x, *y, *d_x, *d_y;
cout << "Size of input: " << n << endl;
unsigned int blocks = ((n + k*THREAD - 1) / (k*THREAD));
cout << "Number of blocks: " << blocks << "\t\tNumber of threads per block: " << THREAD << "\t\tNumber of Indies per thread: " << k << endl;
//Allocate memory on CPU
x = (float*)malloc(n * sizeof(float));
y = (float*)malloc(n * sizeof(float));
//Allocate memory on GPU
cudaMalloc(&d_x, n * sizeof(float));
cudaMalloc(&d_y, n * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < n; i++) {
x[i] = 3.0f;
y[i] = 2.0f;
}
//Sequential add vectors
clock_t begin = clock();
for (int i = 0; i < n; i++) {
y[i] = x[i] + y[i];
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC * 1000;
cout << "The running time for sequential addtition is " << time_spent << " miliseconds." << endl;
// initialize x and y arrays on the host
for (int i = 0; i < n; i++) {
x[i] = 3.0f;
y[i] = 2.0f;
}
begin = clock();
//Copy memory from CPU to GPU
cudaMemcpy(d_x, x, n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, n * sizeof(float), cudaMemcpyHostToDevice);
// Perform Addition on GPU
add << < blocks, THREAD >> >(n, d_x, d_y);
//Copy memory from GPU to CPU
cudaMemcpy(y, d_y, n * sizeof(float), cudaMemcpyDeviceToHost);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC * 1000;
//Verify Results
bool isCorrect = true;
for (int i = 0; i < n; i++){
if (y[i] != 5.0) {
cout << "Incorrect Result at " << i << " = " << y[i] << endl;
isCorrect = false;
break;
}
}
if (isCorrect) cout << "The running time for parallel addtition is " << time_spent << " miliseconds." << endl;
//Free memory on GPU
cudaFree(d_x);
cudaFree(d_y);
//Free memory on CPU
free(x);
free(y);
return 0;
} |
870e88b8599b6e14c5b9aec8eb90f8d1874d192b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <cstdlib>
#include <ctime>
#include <math.h>
#include <iostream>
#include <set>
#include "utils.hpp"
#include "options.hpp"
#include <vector>
#include <tuple>
/////////////////////////////
/////////////////////////////
namespace chrono = std::chrono;
using clock_type = chrono::high_resolution_clock;
/////////////////////////////
/////////////////////////////
#define NUM_THREADS_PER_BLOCK 32
#define THREADS_PER_VECTOR 4
#define MAX_NUM_VECTORS_PER_BLOCK (1024 / THREADS_PER_VECTOR)
extern "C" __global__ void spmv(const int *ptr, const int *idx, const int *val, const float *vec, float *res, int num_rows, int num_nnz) {
for(int n = blockIdx.x * blockDim.x + threadIdx.x; n < num_rows; n += blockDim.x * gridDim.x) {
float sum = 0;
for (int i = ptr[n]; i < ptr[n + 1]; i++) {
sum += val[i] * vec[idx[i]];
}
res[n] = sum;
}
}
extern "C" __global__ void spmv2(int* cudaRowCounter, int* d_ptr, int* d_cols, float* d_val, float* d_vector, float* d_out, int N) {
int i;
int thread_per_vector = THREADS_PER_VECTOR;
float sum;
int row;
int rowStart, rowEnd;
int laneId = threadIdx.x % thread_per_vector; //lane index in the vector
int vectorId = threadIdx.x / thread_per_vector; //vector index in the thread block
int warpLaneId = threadIdx.x & 31; //lane index in the warp
int warpVectorId = warpLaneId / thread_per_vector; //vector index in the warp
__shared__ volatile int space[MAX_NUM_VECTORS_PER_BLOCK][2];
// Get the row index
if (warpLaneId == 0) {
row = atomicAdd(cudaRowCounter, 32 / thread_per_vector);
}
// Broadcast the value to other threads in the same warp and compute the row index of each vector
row = __shfl_sync(0xffffffff, row, 0) + warpVectorId;
while (row < N) {
// Use two threads to fetch the row offset
if (laneId < 2) {
space[vectorId][laneId] = d_ptr[row + laneId];
}
rowStart = space[vectorId][0];
rowEnd = space[vectorId][1];
sum = 0;
// Compute dot product
if (thread_per_vector == 32) {
// Ensure aligned memory access
i = rowStart - (rowStart & (thread_per_vector - 1)) + laneId;
// Process the unaligned part
if (i >= rowStart && i < rowEnd) {
sum += d_val[i] * d_vector[d_cols[i]];
}
// Process the aligned part
for (i += thread_per_vector; i < rowEnd; i += thread_per_vector) {
sum += d_val[i] * d_vector[d_cols[i]];
}
} else {
for (i = rowStart + laneId; i < rowEnd; i += thread_per_vector) {
sum += d_val[i] * d_vector[d_cols[i]];
}
}
// Intra-vector reduction
for (i = thread_per_vector >> 1; i > 0; i >>= 1) {
sum += __shfl_down_sync(0xffffffff,sum, i);
}
// Save the results
if (laneId == 0) {
d_out[row] = sum;
}
// Get a new row index
if(warpLaneId == 0) {
row = atomicAdd(cudaRowCounter, 32 / thread_per_vector);
}
// Broadcast the row index to the other threads in the same warp and compute the row index of each vector
row = __shfl_sync(0xffffffff,row, 0) + warpVectorId;
}
}
// Compute d_out = y + alpha * A * d_vector;
extern "C" __global__ void spmv_full(int* cudaRowCounter, int* d_ptr, int* d_cols, float* d_val, float* d_vector, float* d_out, int N, float alpha, float* y) {
int i;
int thread_per_vector = THREADS_PER_VECTOR;
float sum;
int row;
int rowStart, rowEnd;
int laneId = threadIdx.x % thread_per_vector; //lane index in the vector
int vectorId = threadIdx.x / thread_per_vector; //vector index in the thread block
int warpLaneId = threadIdx.x & 31; //lane index in the warp
int warpVectorId = warpLaneId / thread_per_vector; //vector index in the warp
__shared__ volatile int space[MAX_NUM_VECTORS_PER_BLOCK][2];
// Get the row index
if (warpLaneId == 0) {
row = atomicAdd(cudaRowCounter, 32 / thread_per_vector);
}
// Broadcast the value to other threads in the same warp and compute the row index of each vector
row = __shfl_sync(0xffffffff, row, 0) + warpVectorId;
while (row < N) {
// Use two threads to fetch the row offset
if (laneId < 2) {
space[vectorId][laneId] = d_ptr[row + laneId];
}
rowStart = space[vectorId][0];
rowEnd = space[vectorId][1];
sum = 0;
// Compute dot product
if (thread_per_vector == 32) {
// Ensure aligned memory access
i = rowStart - (rowStart & (thread_per_vector - 1)) + laneId;
// Process the unaligned part
if (i >= rowStart && i < rowEnd) {
sum += d_val[i] * d_vector[d_cols[i]];
}
// Process the aligned part
for (i += thread_per_vector; i < rowEnd; i += thread_per_vector) {
sum += d_val[i] * d_vector[d_cols[i]];
}
} else {
for (i = rowStart + laneId; i < rowEnd; i += thread_per_vector) {
sum += d_val[i] * d_vector[d_cols[i]];
}
}
// Intra-vector reduction
for (i = thread_per_vector >> 1; i > 0; i >>= 1) {
sum += __shfl_down_sync(0xffffffff,sum, i);
}
// Save the results
if (laneId == 0) {
d_out[row] = y[row] + alpha * sum;
}
// Get a new row index
if(warpLaneId == 0) {
row = atomicAdd(cudaRowCounter, 32 / thread_per_vector);
}
// Broadcast the row index to the other threads in the same warp and compute the row index of each vector
row = __shfl_sync(0xffffffff,row, 0) + warpVectorId;
}
}
__inline__ __device__ float warp_reduce(float val) {
int warp_size = 32;
for (int offset = warp_size / 2; offset > 0; offset /= 2)
val += __shfl_down_sync(0xFFFFFFFF, val, offset);
return val;
}
extern "C" __global__ void vector_norm(const float *x, float* z, int N) {
int warp_size = 32;
float sum = float(0);
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
sum += x[i] * x[i];
}
sum = warp_reduce(sum); // Obtain the sum of values in the current warp;
if ((threadIdx.x & (warp_size - 1)) == 0) // Same as (threadIdx.x % warp_size) == 0 but faster
atomicAdd(z, sum); // The first thread in the warp updates the output;
}
extern "C" __global__ void dot_product(const float *x, const float *y, float* z, int N) {
int warp_size = 32;
float sum = float(0);
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
sum += x[i] * y[i];
}
sum = warp_reduce(sum); // Obtain the sum of values in the current warp;
if ((threadIdx.x & (warp_size - 1)) == 0) // Same as (threadIdx.x % warp_size) == 0 but faster
atomicAdd(z, sum); // The first thread in the warp updates the output;
}
// Compute y = val + alpha * x;
extern "C" __global__ void saxpy(float* y, float *val, float *x, float alpha, int n) {
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
y[i] = val[i] + alpha * x[i];
}
}
extern "C" __global__ void cpy(float *y, const float *x, int n) {
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
y[i] = x[i];
}
}
/////////////////////////////
/////////////////////////////
void reset(float *x, float *t1, float *t2, int *row_cnt_1, int *row_cnt_2, int N) {
for (int i = 0; i < N; i++) {
x[i] = 1.0;
}
*t1 = 0.0;
*t2 = 0.0;
*row_cnt_1 = 0;
*row_cnt_2 = 0;
}
void init(float *b, int N) {
for (int i = 0; i < N; i++) {
b[i] = (float)(rand()) / (float)(RAND_MAX);
}
}
template<typename I, typename T>
void random_coo(I* x, I *y, T *val, int N, int degree) {
// Create random matrix entries;
std::vector<std::tuple<I, I, T>> t;
for (int i = 0; i < N; i++) {
std::set<I> edges;
while (edges.size() < degree) {
I edge = (I) rand() % N;
if (i != edge) {
edges.insert(edge);
}
}
for (auto e = edges.begin(); e != edges.end(); e++) {
T tmp = (T)(rand()) / (T)(RAND_MAX);
auto tuple1 = std::make_tuple(i, *e, tmp);
auto tuple2 = std::make_tuple(*e, i, tmp);
t.push_back(tuple1);
t.push_back(tuple2);
}
}
int i = 0;
for (auto t_i = t.begin(); t_i != t.end(); t_i++, i++) {
x[i] = std::get<0>(*t_i);
y[i] = std::get<1>(*t_i);
val[i] = std::get<2>(*t_i);
}
}
/////////////////////////////
/////////////////////////////
int main(int argc, char *argv[]) {
srand(time(0));
Options options = Options(argc, argv);
int debug = options.debug;
int num_executions = options.num_iter;
int N = options.N;
int max_degree = 2;
int iterations = 10;
int num_blocks = options.num_blocks;
int block_size_1d = options.block_size_1d;
int block_size_2d = options.block_size_2d;
int skip_iterations = options.skip_iterations;
int err = 0;
if (debug) {
std::cout << "running b9 default" << std::endl;
std::cout << "N=" << N << std::endl;
std::cout << "num executions=" << num_executions << std::endl;
std::cout << "block size 1d=" << block_size_1d << std::endl;
std::cout << "block size 2d=" << block_size_2d << std::endl;
std::cout << "num blocks=" << num_blocks << std::endl;
std::cout << "skip iteration time=" << skip_iterations << std::endl;
}
auto start = clock_type::now();
int nnz = N * max_degree * 2;
int *ptr, *idx, *rowCounter1, *rowCounter2;
float *x, *b, *val, *p, *r, *t1, *t2, *y;
err = hipMallocManaged(&ptr, sizeof(int) * (N + 1));
err = hipMallocManaged(&idx, sizeof(int) * nnz);
err = hipMallocManaged(&val, sizeof(float) * nnz);
err = hipMallocManaged(&rowCounter1, sizeof(int));
err = hipMallocManaged(&rowCounter2, sizeof(int));
err = hipMallocManaged(&x, sizeof(float) * N);
err = hipMallocManaged(&b, sizeof(float) * N);
err = hipMallocManaged(&p, sizeof(float) * N);
err = hipMallocManaged(&r, sizeof(float) * N);
err = hipMallocManaged(&y, sizeof(float) * N);
err = hipMallocManaged(&t1, sizeof(float));
err = hipMallocManaged(&t2, sizeof(float));
// Create streams;
hipStream_t s1, s2;
err = hipStreamCreate(&s1);
err = hipStreamCreate(&s2);
if (debug && err) std::cout << err << std::endl;
if (debug && err) std::cout << err << std::endl;
// Initialze arrays;
start = clock_type::now();
int *x_coo = (int*) malloc(nnz * sizeof(int));
int *y_coo = (int*) malloc(nnz * sizeof(int));
float *v_coo = (float*) malloc(nnz * sizeof(float));
random_coo(x_coo, y_coo, v_coo, N, max_degree);
coo2csr(ptr, idx, val, x_coo, y_coo, v_coo, N, N, nnz);
init(b, N);
auto end = clock_type::now();
if (debug) std::cout << "init=" << (float) chrono::duration_cast<chrono::microseconds>(end - start).count() / 1000 << " ms" << std::endl;
// Print header;
if (!debug) std::cout << "num_iter,gpu_result,total_time_sec,overhead_sec,computation_sec" << std::endl;
float tot = 0;
for (int i = 0; i < num_executions; i++) {
if (debug) std::cout << "\n-- iter=" << i << std::endl;
auto start_tmp = clock_type::now();
reset(x, t1, t2, rowCounter1, rowCounter2, N);
auto end_tmp = clock_type::now();
auto reset_time = chrono::duration_cast<chrono::microseconds>(end_tmp - start_tmp).count();
if (debug) std::cout << " reset=" << (float) reset_time / 1000 << " ms" << std::endl;
int nb = ceil(N / ((float) block_size_1d));
start = clock_type::now();
hipStreamAttachMemAsync(s2, t1, 0);
hipLaunchKernelGGL(( spmv_full), dim3(nb), dim3(block_size_1d), block_size_1d * sizeof(float), s1, rowCounter1, ptr, idx, val, x, r, N, -1.0, b);
hipLaunchKernelGGL(( cpy), dim3(num_blocks), dim3(block_size_1d), 0, s1, p, r, N);
hipEvent_t e1;
hipEventCreate(&e1);
hipEventRecord(e1, s1);
hipStreamWaitEvent(s2, e1, 0);
hipLaunchKernelGGL(( vector_norm), dim3(num_blocks), dim3(block_size_1d), 0, s2, r, t1, N);
for (int iter = 0; iter < iterations; iter++) {
hipLaunchKernelGGL(( spmv2), dim3(nb), dim3(block_size_1d), block_size_1d * sizeof(float), s1, rowCounter2, ptr, idx, val, p, y, N);
hipLaunchKernelGGL(( dot_product), dim3(num_blocks), dim3(block_size_1d), 0, s1, p, y, t2, N);
hipStreamSynchronize(s1);
hipStreamSynchronize(s2);
float alpha = *t1 / *t2;
float old_t1 = *t1;
*t1 = 0.0;
*rowCounter1 = 0;
*rowCounter2 = 0;
hipLaunchKernelGGL(( saxpy), dim3(num_blocks), dim3(block_size_1d), 0, s1, x, x, p, alpha, N);
hipLaunchKernelGGL(( saxpy), dim3(num_blocks), dim3(block_size_1d), 0, s2, r, r, y, -1.0 * alpha, N);
hipLaunchKernelGGL(( vector_norm), dim3(num_blocks), dim3(block_size_1d), 0, s2, r, t1, N);
hipStreamSynchronize(s2);
float beta = *t1 / old_t1;
hipLaunchKernelGGL(( saxpy), dim3(num_blocks), dim3(block_size_1d), 0, s1, p, r, p, beta, N);
}
hipStreamSynchronize(s1);
end = clock_type::now();
auto tmp = chrono::duration_cast<chrono::microseconds>(end - start).count();
tot += tmp;
if (debug) {
std::cout << " gpu result=[";
for (int j = 0; j < 10; j++) {
std::cout << x[j] << ", ";
}
std::cout << ", ...]; time=" << (float) tmp / 1000 << " ms" << std::endl;
} else {
std::cout << i << "," << 0.0 << "," << (float) (reset_time + tmp) / 1e6 << "," << (float) reset_time / 1e6 << "," << (float) tmp / 1e6 << std::endl;
}
}
// Print;
hipDeviceSynchronize();
if (debug) std::cout << "\nmean exec time=" << (float) tot / (1000 * num_executions) << " ms" << std::endl;
}
| 870e88b8599b6e14c5b9aec8eb90f8d1874d192b.cu | #include <chrono>
#include <cstdlib>
#include <ctime>
#include <math.h>
#include <iostream>
#include <set>
#include "utils.hpp"
#include "options.hpp"
#include <vector>
#include <tuple>
/////////////////////////////
/////////////////////////////
namespace chrono = std::chrono;
using clock_type = chrono::high_resolution_clock;
/////////////////////////////
/////////////////////////////
#define NUM_THREADS_PER_BLOCK 32
#define THREADS_PER_VECTOR 4
#define MAX_NUM_VECTORS_PER_BLOCK (1024 / THREADS_PER_VECTOR)
extern "C" __global__ void spmv(const int *ptr, const int *idx, const int *val, const float *vec, float *res, int num_rows, int num_nnz) {
for(int n = blockIdx.x * blockDim.x + threadIdx.x; n < num_rows; n += blockDim.x * gridDim.x) {
float sum = 0;
for (int i = ptr[n]; i < ptr[n + 1]; i++) {
sum += val[i] * vec[idx[i]];
}
res[n] = sum;
}
}
extern "C" __global__ void spmv2(int* cudaRowCounter, int* d_ptr, int* d_cols, float* d_val, float* d_vector, float* d_out, int N) {
int i;
int thread_per_vector = THREADS_PER_VECTOR;
float sum;
int row;
int rowStart, rowEnd;
int laneId = threadIdx.x % thread_per_vector; //lane index in the vector
int vectorId = threadIdx.x / thread_per_vector; //vector index in the thread block
int warpLaneId = threadIdx.x & 31; //lane index in the warp
int warpVectorId = warpLaneId / thread_per_vector; //vector index in the warp
__shared__ volatile int space[MAX_NUM_VECTORS_PER_BLOCK][2];
// Get the row index
if (warpLaneId == 0) {
row = atomicAdd(cudaRowCounter, 32 / thread_per_vector);
}
// Broadcast the value to other threads in the same warp and compute the row index of each vector
row = __shfl_sync(0xffffffff, row, 0) + warpVectorId;
while (row < N) {
// Use two threads to fetch the row offset
if (laneId < 2) {
space[vectorId][laneId] = d_ptr[row + laneId];
}
rowStart = space[vectorId][0];
rowEnd = space[vectorId][1];
sum = 0;
// Compute dot product
if (thread_per_vector == 32) {
// Ensure aligned memory access
i = rowStart - (rowStart & (thread_per_vector - 1)) + laneId;
// Process the unaligned part
if (i >= rowStart && i < rowEnd) {
sum += d_val[i] * d_vector[d_cols[i]];
}
// Process the aligned part
for (i += thread_per_vector; i < rowEnd; i += thread_per_vector) {
sum += d_val[i] * d_vector[d_cols[i]];
}
} else {
for (i = rowStart + laneId; i < rowEnd; i += thread_per_vector) {
sum += d_val[i] * d_vector[d_cols[i]];
}
}
// Intra-vector reduction
for (i = thread_per_vector >> 1; i > 0; i >>= 1) {
sum += __shfl_down_sync(0xffffffff,sum, i);
}
// Save the results
if (laneId == 0) {
d_out[row] = sum;
}
// Get a new row index
if(warpLaneId == 0) {
row = atomicAdd(cudaRowCounter, 32 / thread_per_vector);
}
// Broadcast the row index to the other threads in the same warp and compute the row index of each vector
row = __shfl_sync(0xffffffff,row, 0) + warpVectorId;
}
}
// Compute d_out = y + alpha * A * d_vector;
extern "C" __global__ void spmv_full(int* cudaRowCounter, int* d_ptr, int* d_cols, float* d_val, float* d_vector, float* d_out, int N, float alpha, float* y) {
int i;
int thread_per_vector = THREADS_PER_VECTOR;
float sum;
int row;
int rowStart, rowEnd;
int laneId = threadIdx.x % thread_per_vector; //lane index in the vector
int vectorId = threadIdx.x / thread_per_vector; //vector index in the thread block
int warpLaneId = threadIdx.x & 31; //lane index in the warp
int warpVectorId = warpLaneId / thread_per_vector; //vector index in the warp
__shared__ volatile int space[MAX_NUM_VECTORS_PER_BLOCK][2];
// Get the row index
if (warpLaneId == 0) {
row = atomicAdd(cudaRowCounter, 32 / thread_per_vector);
}
// Broadcast the value to other threads in the same warp and compute the row index of each vector
row = __shfl_sync(0xffffffff, row, 0) + warpVectorId;
while (row < N) {
// Use two threads to fetch the row offset
if (laneId < 2) {
space[vectorId][laneId] = d_ptr[row + laneId];
}
rowStart = space[vectorId][0];
rowEnd = space[vectorId][1];
sum = 0;
// Compute dot product
if (thread_per_vector == 32) {
// Ensure aligned memory access
i = rowStart - (rowStart & (thread_per_vector - 1)) + laneId;
// Process the unaligned part
if (i >= rowStart && i < rowEnd) {
sum += d_val[i] * d_vector[d_cols[i]];
}
// Process the aligned part
for (i += thread_per_vector; i < rowEnd; i += thread_per_vector) {
sum += d_val[i] * d_vector[d_cols[i]];
}
} else {
for (i = rowStart + laneId; i < rowEnd; i += thread_per_vector) {
sum += d_val[i] * d_vector[d_cols[i]];
}
}
// Intra-vector reduction
for (i = thread_per_vector >> 1; i > 0; i >>= 1) {
sum += __shfl_down_sync(0xffffffff,sum, i);
}
// Save the results
if (laneId == 0) {
d_out[row] = y[row] + alpha * sum;
}
// Get a new row index
if(warpLaneId == 0) {
row = atomicAdd(cudaRowCounter, 32 / thread_per_vector);
}
// Broadcast the row index to the other threads in the same warp and compute the row index of each vector
row = __shfl_sync(0xffffffff,row, 0) + warpVectorId;
}
}
__inline__ __device__ float warp_reduce(float val) {
int warp_size = 32;
for (int offset = warp_size / 2; offset > 0; offset /= 2)
val += __shfl_down_sync(0xFFFFFFFF, val, offset);
return val;
}
extern "C" __global__ void vector_norm(const float *x, float* z, int N) {
int warp_size = 32;
float sum = float(0);
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
sum += x[i] * x[i];
}
sum = warp_reduce(sum); // Obtain the sum of values in the current warp;
if ((threadIdx.x & (warp_size - 1)) == 0) // Same as (threadIdx.x % warp_size) == 0 but faster
atomicAdd(z, sum); // The first thread in the warp updates the output;
}
extern "C" __global__ void dot_product(const float *x, const float *y, float* z, int N) {
int warp_size = 32;
float sum = float(0);
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
sum += x[i] * y[i];
}
sum = warp_reduce(sum); // Obtain the sum of values in the current warp;
if ((threadIdx.x & (warp_size - 1)) == 0) // Same as (threadIdx.x % warp_size) == 0 but faster
atomicAdd(z, sum); // The first thread in the warp updates the output;
}
// Compute y = val + alpha * x;
extern "C" __global__ void saxpy(float* y, float *val, float *x, float alpha, int n) {
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
y[i] = val[i] + alpha * x[i];
}
}
extern "C" __global__ void cpy(float *y, const float *x, int n) {
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
y[i] = x[i];
}
}
/////////////////////////////
/////////////////////////////
void reset(float *x, float *t1, float *t2, int *row_cnt_1, int *row_cnt_2, int N) {
for (int i = 0; i < N; i++) {
x[i] = 1.0;
}
*t1 = 0.0;
*t2 = 0.0;
*row_cnt_1 = 0;
*row_cnt_2 = 0;
}
void init(float *b, int N) {
for (int i = 0; i < N; i++) {
b[i] = (float)(rand()) / (float)(RAND_MAX);
}
}
template<typename I, typename T>
void random_coo(I* x, I *y, T *val, int N, int degree) {
// Create random matrix entries;
std::vector<std::tuple<I, I, T>> t;
for (int i = 0; i < N; i++) {
std::set<I> edges;
while (edges.size() < degree) {
I edge = (I) rand() % N;
if (i != edge) {
edges.insert(edge);
}
}
for (auto e = edges.begin(); e != edges.end(); e++) {
T tmp = (T)(rand()) / (T)(RAND_MAX);
auto tuple1 = std::make_tuple(i, *e, tmp);
auto tuple2 = std::make_tuple(*e, i, tmp);
t.push_back(tuple1);
t.push_back(tuple2);
}
}
int i = 0;
for (auto t_i = t.begin(); t_i != t.end(); t_i++, i++) {
x[i] = std::get<0>(*t_i);
y[i] = std::get<1>(*t_i);
val[i] = std::get<2>(*t_i);
}
}
/////////////////////////////
/////////////////////////////
int main(int argc, char *argv[]) {
srand(time(0));
Options options = Options(argc, argv);
int debug = options.debug;
int num_executions = options.num_iter;
int N = options.N;
int max_degree = 2;
int iterations = 10;
int num_blocks = options.num_blocks;
int block_size_1d = options.block_size_1d;
int block_size_2d = options.block_size_2d;
int skip_iterations = options.skip_iterations;
int err = 0;
if (debug) {
std::cout << "running b9 default" << std::endl;
std::cout << "N=" << N << std::endl;
std::cout << "num executions=" << num_executions << std::endl;
std::cout << "block size 1d=" << block_size_1d << std::endl;
std::cout << "block size 2d=" << block_size_2d << std::endl;
std::cout << "num blocks=" << num_blocks << std::endl;
std::cout << "skip iteration time=" << skip_iterations << std::endl;
}
auto start = clock_type::now();
int nnz = N * max_degree * 2;
int *ptr, *idx, *rowCounter1, *rowCounter2;
float *x, *b, *val, *p, *r, *t1, *t2, *y;
err = cudaMallocManaged(&ptr, sizeof(int) * (N + 1));
err = cudaMallocManaged(&idx, sizeof(int) * nnz);
err = cudaMallocManaged(&val, sizeof(float) * nnz);
err = cudaMallocManaged(&rowCounter1, sizeof(int));
err = cudaMallocManaged(&rowCounter2, sizeof(int));
err = cudaMallocManaged(&x, sizeof(float) * N);
err = cudaMallocManaged(&b, sizeof(float) * N);
err = cudaMallocManaged(&p, sizeof(float) * N);
err = cudaMallocManaged(&r, sizeof(float) * N);
err = cudaMallocManaged(&y, sizeof(float) * N);
err = cudaMallocManaged(&t1, sizeof(float));
err = cudaMallocManaged(&t2, sizeof(float));
// Create streams;
cudaStream_t s1, s2;
err = cudaStreamCreate(&s1);
err = cudaStreamCreate(&s2);
if (debug && err) std::cout << err << std::endl;
if (debug && err) std::cout << err << std::endl;
// Initialze arrays;
start = clock_type::now();
int *x_coo = (int*) malloc(nnz * sizeof(int));
int *y_coo = (int*) malloc(nnz * sizeof(int));
float *v_coo = (float*) malloc(nnz * sizeof(float));
random_coo(x_coo, y_coo, v_coo, N, max_degree);
coo2csr(ptr, idx, val, x_coo, y_coo, v_coo, N, N, nnz);
init(b, N);
auto end = clock_type::now();
if (debug) std::cout << "init=" << (float) chrono::duration_cast<chrono::microseconds>(end - start).count() / 1000 << " ms" << std::endl;
// Print header;
if (!debug) std::cout << "num_iter,gpu_result,total_time_sec,overhead_sec,computation_sec" << std::endl;
float tot = 0;
for (int i = 0; i < num_executions; i++) {
if (debug) std::cout << "\n-- iter=" << i << std::endl;
auto start_tmp = clock_type::now();
reset(x, t1, t2, rowCounter1, rowCounter2, N);
auto end_tmp = clock_type::now();
auto reset_time = chrono::duration_cast<chrono::microseconds>(end_tmp - start_tmp).count();
if (debug) std::cout << " reset=" << (float) reset_time / 1000 << " ms" << std::endl;
int nb = ceil(N / ((float) block_size_1d));
start = clock_type::now();
cudaStreamAttachMemAsync(s2, t1, 0);
spmv_full<<<nb, block_size_1d, block_size_1d * sizeof(float), s1>>>(rowCounter1, ptr, idx, val, x, r, N, -1.0, b);
cpy<<<num_blocks, block_size_1d, 0, s1>>>(p, r, N);
cudaEvent_t e1;
cudaEventCreate(&e1);
cudaEventRecord(e1, s1);
cudaStreamWaitEvent(s2, e1, 0);
vector_norm<<<num_blocks, block_size_1d, 0, s2>>>(r, t1, N);
for (int iter = 0; iter < iterations; iter++) {
spmv2<<<nb, block_size_1d, block_size_1d * sizeof(float), s1>>>(rowCounter2, ptr, idx, val, p, y, N);
dot_product<<<num_blocks, block_size_1d, 0, s1>>>(p, y, t2, N);
cudaStreamSynchronize(s1);
cudaStreamSynchronize(s2);
float alpha = *t1 / *t2;
float old_t1 = *t1;
*t1 = 0.0;
*rowCounter1 = 0;
*rowCounter2 = 0;
saxpy<<<num_blocks, block_size_1d, 0, s1>>>(x, x, p, alpha, N);
saxpy<<<num_blocks, block_size_1d, 0, s2>>>(r, r, y, -1.0 * alpha, N);
vector_norm<<<num_blocks, block_size_1d, 0, s2>>>(r, t1, N);
cudaStreamSynchronize(s2);
float beta = *t1 / old_t1;
saxpy<<<num_blocks, block_size_1d, 0, s1>>>(p, r, p, beta, N);
}
cudaStreamSynchronize(s1);
end = clock_type::now();
auto tmp = chrono::duration_cast<chrono::microseconds>(end - start).count();
tot += tmp;
if (debug) {
std::cout << " gpu result=[";
for (int j = 0; j < 10; j++) {
std::cout << x[j] << ", ";
}
std::cout << ", ...]; time=" << (float) tmp / 1000 << " ms" << std::endl;
} else {
std::cout << i << "," << 0.0 << "," << (float) (reset_time + tmp) / 1e6 << "," << (float) reset_time / 1e6 << "," << (float) tmp / 1e6 << std::endl;
}
}
// Print;
cudaDeviceSynchronize();
if (debug) std::cout << "\nmean exec time=" << (float) tot / (1000 * num_executions) << " ms" << std::endl;
}
|
c2f5ec9f227b4ce8d3c524c54eed5b64538502d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
using namespace std;
// image size
int rows = 1224, cols = 1624;
int imgSize = rows*cols;
// iterations for stereo matching algorithm
int iteration = 1;
// disparity range
int Dmin = 1;
int Dmax = 80;
int Drange = Dmax - Dmin + 1;
//int winRadius = 9;
// device image pointer
float* dLImgPtr_f = NULL;
float* dRImgPtr_f = NULL;
size_t lPitch, rPitch;
// texture memory for stereo image pair <Type, Dim, ReadMode>
texture<float, 2, hipReadModeElementType> lTex;
texture<float, 2, hipReadModeElementType> rTex;
// timing arrays
const int nt = 2;
double start[nt], end[nt];
double random_start[nt], random_end[nt];
double main_start[nt], main_end[nt];
// evaluate window-based disimilarity
__global__ void init(unsigned int seed, hiprandState_t* states, int cols)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int idx = y*cols+x;
hiprand_init(seed, idx, 0, &states[idx]);
} | c2f5ec9f227b4ce8d3c524c54eed5b64538502d1.cu | #include "includes.h"
using namespace std;
// image size
int rows = 1224, cols = 1624;
int imgSize = rows*cols;
// iterations for stereo matching algorithm
int iteration = 1;
// disparity range
int Dmin = 1;
int Dmax = 80;
int Drange = Dmax - Dmin + 1;
//int winRadius = 9;
// device image pointer
float* dLImgPtr_f = NULL;
float* dRImgPtr_f = NULL;
size_t lPitch, rPitch;
// texture memory for stereo image pair <Type, Dim, ReadMode>
texture<float, 2, cudaReadModeElementType> lTex;
texture<float, 2, cudaReadModeElementType> rTex;
// timing arrays
const int nt = 2;
double start[nt], end[nt];
double random_start[nt], random_end[nt];
double main_start[nt], main_end[nt];
// evaluate window-based disimilarity
__global__ void init(unsigned int seed, curandState_t* states, int cols)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int idx = y*cols+x;
curand_init(seed, idx, 0, &states[idx]);
} |
3c40e18db14b6626ea7a7f32378514090cfe7f9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "NewtonSolverCusp.h"
// general cuda includes
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// cusp and thurst includes
#include <cusp/coo_matrix.h>
#include <cusp/print.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/zip_iterator.h>
#include <cusp/krylov/cg.h>
#include <cusp/monitor.h>
#include <thrust/copy.h>
std::vector<float> NewtonSolverCusp::step(std::vector<int> &stdI, std::vector<int> &stdJ, std::vector<float> &stdV, std::vector<float> &stdForce)
{
int num_triplets = stdI.size();
cusp::array1d<int, cusp::device_memory> I(num_triplets); // row indices
cusp::array1d<int, cusp::device_memory> J(num_triplets); // column indices
cusp::array1d<float, cusp::device_memory> V(num_triplets); // value
thrust::copy(stdI.begin(), stdI.end(), I.begin());
thrust::copy(stdJ.begin(), stdJ.end(), J.begin());
thrust::copy(stdV.begin(), stdV.end(), V.begin());
thrust::stable_sort_by_key(J.begin(), J.end(), thrust::make_zip_iterator(thrust::make_tuple(I.begin(), V.begin())));
thrust::stable_sort_by_key(I.begin(), I.end(), thrust::make_zip_iterator(thrust::make_tuple(J.begin(), V.begin())));
int num_entries = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end (), J.end())) - 1,
thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())) + 1,
int(0),
thrust::plus<int>(),
thrust::not_equal_to< thrust::tuple<int,int> >()) + 1;
int num_rows = stdForce.size();
int num_cols = stdForce.size();
cusp::coo_matrix<int, float, cusp::device_memory> K(num_rows, num_cols, num_entries);
thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())),
V.begin(),
thrust::make_zip_iterator(thrust::make_tuple(K.row_indices.begin(), K.column_indices.begin())),
K.values.begin(),
thrust::equal_to< thrust::tuple<int,int> >(),
thrust::plus<float>());
cusp::array1d<float, cusp::device_memory> x(K.num_rows, 0);
cusp::array1d<float, cusp::device_memory> f(K.num_rows);
thrust::copy(stdForce.begin(), stdForce.end(), f.begin());
// set stopping criteria:
// iteration_limit = 100
// relative_tolerance = 1e-3
//cusp::verbose_monitor<float> monitor(f, 100, 1e-3);
// set preconditioner (identity)
cusp::identity_operator<float, cusp::device_memory> M(K.num_rows, K.num_rows);
cusp::krylov::cg(K,x,f);
std::vector<float> deltaX(K.num_rows);
thrust::copy(x.begin(), x.end(), deltaX.begin());
return deltaX;
}
//NewtonMethodStepper::NewtonMethodStepper(ElementMesh * mesh) : BaseStepper(mesh)
//{
/*
totalExternalForce = Eigen::Vector3f::Zero();
Eigen::Vector3f force(0,-0.1,0);
mesh->externalForcesPerVertex.push_back(force);
for (int i = 0; i < mesh->externalForcesPerVertex.size(); ++i)
{
totalExternalForce += mesh->externalForcesPerVertex[i];
}
*/
//}
//void NewtonMethodStepper::step()
//{
//std::cout << "Taking Newton's Method step" << std::endl;
//
//float stepSize = 0.01f; // TODO: Needs to be adaptively determined
//int numNonFixedVertices = 0;
//std::vector<int> nonFixedIndexes;
//Eigen::VectorXf totalForceVector(3*mesh->coords.size());
//totalForceVector.setZero();
//// DENSE MATRIX CODE
////Eigen::MatrixXf K(3*mesh->coords.size(), 3*mesh->coords.size());
////K.setZero();
//
//// SPARSE MATRIX CODE
//std::vector<Triplet> tripletsK;
//for (int sharedCoordI = 0; sharedCoordI < mesh->coords.size(); ++sharedCoordI)
//{
// if (mesh->sharedIndexBase.count(sharedCoordI) == 0)
// {
// ++numNonFixedVertices;
// nonFixedIndexes.push_back(sharedCoordI);
// if (sharedCoordI >= 72)//11*11*39 )//25*16)
// {
// totalForceVector.block(3*sharedCoordI, 0, 3, 1) = totalExternalForce; // TODO
// }
// }
// else
// {
// totalForceVector.block(3*sharedCoordI, 0, 3, 1) = Eigen::Vector3f::Zero();
// }
//}
//for (int elementI = 0; elementI < mesh->elements.size(); ++elementI)
//{
// HexElement * elem = (HexElement*) mesh->elements[elementI];
// std::vector<Eigen::Vector3f> elemDeformedCoords;
// for (int ii = 0; ii < elem->vertices.size(); ++ii)
// {
// elemDeformedCoords.push_back(mesh->coords[elem->vertices[ii]]);
// }
//
// for (int ii = 0; ii < elem->vertices.size(); ++ii)
// {
// int sharedCoordIndex = elem->vertices[ii];
//
// if (mesh->sharedIndexBase.count(sharedCoordIndex) > 0)
// {
// continue;
// }
// Eigen::Vector3f forceOnVertex = elem->getForce(elemDeformedCoords, ii);
// totalForceVector.block(3*sharedCoordIndex, 0, 3, 1) = totalForceVector.block(3*sharedCoordIndex, 0, 3, 1) + forceOnVertex;
// }
// //std::cout << "Total Force Vector: " << totalForceVector << std::endl;
// // put element K into total K
// Eigen::MatrixXf elementK = elem->stiffnessMatrix(elemDeformedCoords);
// for (int rowI = 0; rowI < elem->vertices.size(); ++rowI) // rows
// {
// int rowSharedCoordIndex = elem->vertices[rowI];
// for (int colI = 0; colI < elem->vertices.size(); ++colI) // columns
// {
// int colSharedCoordIndex = elem->vertices[colI];
// Eigen::Matrix3f elementKBlock = elementK.block(3*rowI, 3*colI, 3, 3);
//
// // DENSE MATRIX CODE
// //K.block(3*rowSharedCoordIndex, 3*colSharedCoordIndex, 3, 3) += elementKBlock;
// // SPARSE MATRIX CODE
// for (int r = 0; r < 3; ++r)
// {
// for (int c = 0; c < 3; ++c)
// {
// if (elementKBlock(r,c) != 0.0f)
// {
// tripletsK.push_back( Triplet(3*rowSharedCoordIndex + r, 3*colSharedCoordIndex + c, elementKBlock(r,c)) );
// }
// }
// }
// }
// }
//}
//
//// SPARSE MATRIC C
//SparseMatrix K(3*mesh->coords.size(), 3*mesh->coords.size());
//K.setFromTriplets(tripletsK.begin(), tripletsK.end());
//tripletsK.clear();
//Eigen::MatrixXf newK(3*numNonFixedVertices, 3*numNonFixedVertices);
//newK.setZero();
////cusp::array1d<int, cusp::device_memory> I(); // row indices
// //cusp::array1d<int, cusp::device_memory> J(); // column indices
// //cusp::array1d<float, cusp::device_memory> V(); // values
//Eigen::VectorXf newForce = Eigen::VectorXf::Zero(3*numNonFixedVertices);
//int nRowsNonFixed = 0;
//for (int rowI = 0; rowI < mesh->coords.size(); ++rowI)
//{
// // row fixed
// if (mesh->sharedIndexBase.count(rowI) > 0)
// {
// continue;
// }
//
// int nColsNonFixed = 0;
// for (int colI = 0; colI < mesh->coords.size(); ++colI)
// {
// if (mesh->sharedIndexBase.count(colI) > 0)
// {
// continue;
// }
// // DENSE MATRIX CODE
// // newK.block(3*nRowsNonFixed, 3*nColsNonFixed, 3, 3) += K.block(3*rowI, 3*colI, 3, 3);
//
// for (int r = 0; r < 3; ++r)
// {
// for (int c = 0; c < 3; ++c)
// {
// //I.
// //.push_back( Triplet(3*nRowsNonFixed + r, 3*nColsNonFixed + c, K.coeff(3*rowI + r, 3*colI + c)) );
// }
// }
//
// ++nColsNonFixed;
// }
//
// ++nRowsNonFixed;
//}
//
///*
//SparseMatrix newK(3*numNonFixedVertices, 3*numNonFixedVertices);;
//newK.setFromTriplets(tripletsNewK.begin(), tripletsNewK.end());
//tripletsNewK.clear();
//*/
//int nonFixedCount = 0;
//for (int ii = 0; ii < mesh->coords.size(); ++ii)
//{
// if (mesh->sharedIndexBase.count(ii) > 0)
// {
// continue;
// }
// newForce.block(3*nonFixedCount, 0, 3, 1) += totalForceVector.block(3*ii, 0, 3, 1);
// ++nonFixedCount;
//}
//
////Eigen::ConjugateGradient<SparseMatrix> cg;
////std::cout << "newK: " << newK;
////cg.compute(newK);
///*
//Eigen::VectorXf deltaX = Eigen::VectorXf::Random(3*numNonFixedVertices);
//cg.setMaxIterations(1);
//int i = 0;
//do
//{
// deltaX = cg.solveWithGuess(newForce,deltaX);
// std::cout << i << " : " << cg.error() << std::endl;
// ++i;
//} while (cg.info()!=Eigen::Success && i<100);
//*/
////Eigen::VectorXf deltaX(3*numNonFixedVertices);
////deltaX = cg.solve(newForce);
////std::cout << "Error: " << cg.error() << std::endl;
////const Eigen::VectorXf deltaX = chol.solve(newForce);
////std::cout << "newK: " << newK << std::endl;
////std::cout << "newForce: " << newForce << std::endl;
////std::cout << "deltaX: " << deltaX << std::endl;
//Eigen::VectorXf deltaX = newK.colPivHouseholderQr().solve(newForce);
//for (int ii = 0; ii < numNonFixedVertices; ++ii)
//{
// int sharedCoordIndex = nonFixedIndexes[ii];
// mesh->coords[sharedCoordIndex] += stepSize * deltaX.block(3*ii, 0, 3, 1);
//}
//}
| 3c40e18db14b6626ea7a7f32378514090cfe7f9d.cu | #include "NewtonSolverCusp.h"
// general cuda includes
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// cusp and thurst includes
#include <cusp/coo_matrix.h>
#include <cusp/print.h>
#include <thrust/sort.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/zip_iterator.h>
#include <cusp/krylov/cg.h>
#include <cusp/monitor.h>
#include <thrust/copy.h>
std::vector<float> NewtonSolverCusp::step(std::vector<int> &stdI, std::vector<int> &stdJ, std::vector<float> &stdV, std::vector<float> &stdForce)
{
int num_triplets = stdI.size();
cusp::array1d<int, cusp::device_memory> I(num_triplets); // row indices
cusp::array1d<int, cusp::device_memory> J(num_triplets); // column indices
cusp::array1d<float, cusp::device_memory> V(num_triplets); // value
thrust::copy(stdI.begin(), stdI.end(), I.begin());
thrust::copy(stdJ.begin(), stdJ.end(), J.begin());
thrust::copy(stdV.begin(), stdV.end(), V.begin());
thrust::stable_sort_by_key(J.begin(), J.end(), thrust::make_zip_iterator(thrust::make_tuple(I.begin(), V.begin())));
thrust::stable_sort_by_key(I.begin(), I.end(), thrust::make_zip_iterator(thrust::make_tuple(J.begin(), V.begin())));
int num_entries = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end (), J.end())) - 1,
thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())) + 1,
int(0),
thrust::plus<int>(),
thrust::not_equal_to< thrust::tuple<int,int> >()) + 1;
int num_rows = stdForce.size();
int num_cols = stdForce.size();
cusp::coo_matrix<int, float, cusp::device_memory> K(num_rows, num_cols, num_entries);
thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())),
V.begin(),
thrust::make_zip_iterator(thrust::make_tuple(K.row_indices.begin(), K.column_indices.begin())),
K.values.begin(),
thrust::equal_to< thrust::tuple<int,int> >(),
thrust::plus<float>());
cusp::array1d<float, cusp::device_memory> x(K.num_rows, 0);
cusp::array1d<float, cusp::device_memory> f(K.num_rows);
thrust::copy(stdForce.begin(), stdForce.end(), f.begin());
// set stopping criteria:
// iteration_limit = 100
// relative_tolerance = 1e-3
//cusp::verbose_monitor<float> monitor(f, 100, 1e-3);
// set preconditioner (identity)
cusp::identity_operator<float, cusp::device_memory> M(K.num_rows, K.num_rows);
cusp::krylov::cg(K,x,f);
std::vector<float> deltaX(K.num_rows);
thrust::copy(x.begin(), x.end(), deltaX.begin());
return deltaX;
}
//NewtonMethodStepper::NewtonMethodStepper(ElementMesh * mesh) : BaseStepper(mesh)
//{
/*
totalExternalForce = Eigen::Vector3f::Zero();
Eigen::Vector3f force(0,-0.1,0);
mesh->externalForcesPerVertex.push_back(force);
for (int i = 0; i < mesh->externalForcesPerVertex.size(); ++i)
{
totalExternalForce += mesh->externalForcesPerVertex[i];
}
*/
//}
//void NewtonMethodStepper::step()
//{
//std::cout << "Taking Newton's Method step" << std::endl;
//
//float stepSize = 0.01f; // TODO: Needs to be adaptively determined
//int numNonFixedVertices = 0;
//std::vector<int> nonFixedIndexes;
//Eigen::VectorXf totalForceVector(3*mesh->coords.size());
//totalForceVector.setZero();
//// DENSE MATRIX CODE
////Eigen::MatrixXf K(3*mesh->coords.size(), 3*mesh->coords.size());
////K.setZero();
//
//// SPARSE MATRIX CODE
//std::vector<Triplet> tripletsK;
//for (int sharedCoordI = 0; sharedCoordI < mesh->coords.size(); ++sharedCoordI)
//{
// if (mesh->sharedIndexBase.count(sharedCoordI) == 0)
// {
// ++numNonFixedVertices;
// nonFixedIndexes.push_back(sharedCoordI);
// if (sharedCoordI >= 72)//11*11*39 )//25*16)
// {
// totalForceVector.block(3*sharedCoordI, 0, 3, 1) = totalExternalForce; // TODO
// }
// }
// else
// {
// totalForceVector.block(3*sharedCoordI, 0, 3, 1) = Eigen::Vector3f::Zero();
// }
//}
//for (int elementI = 0; elementI < mesh->elements.size(); ++elementI)
//{
// HexElement * elem = (HexElement*) mesh->elements[elementI];
// std::vector<Eigen::Vector3f> elemDeformedCoords;
// for (int ii = 0; ii < elem->vertices.size(); ++ii)
// {
// elemDeformedCoords.push_back(mesh->coords[elem->vertices[ii]]);
// }
//
// for (int ii = 0; ii < elem->vertices.size(); ++ii)
// {
// int sharedCoordIndex = elem->vertices[ii];
//
// if (mesh->sharedIndexBase.count(sharedCoordIndex) > 0)
// {
// continue;
// }
// Eigen::Vector3f forceOnVertex = elem->getForce(elemDeformedCoords, ii);
// totalForceVector.block(3*sharedCoordIndex, 0, 3, 1) = totalForceVector.block(3*sharedCoordIndex, 0, 3, 1) + forceOnVertex;
// }
// //std::cout << "Total Force Vector: " << totalForceVector << std::endl;
// // put element K into total K
// Eigen::MatrixXf elementK = elem->stiffnessMatrix(elemDeformedCoords);
// for (int rowI = 0; rowI < elem->vertices.size(); ++rowI) // rows
// {
// int rowSharedCoordIndex = elem->vertices[rowI];
// for (int colI = 0; colI < elem->vertices.size(); ++colI) // columns
// {
// int colSharedCoordIndex = elem->vertices[colI];
// Eigen::Matrix3f elementKBlock = elementK.block(3*rowI, 3*colI, 3, 3);
//
// // DENSE MATRIX CODE
// //K.block(3*rowSharedCoordIndex, 3*colSharedCoordIndex, 3, 3) += elementKBlock;
// // SPARSE MATRIX CODE
// for (int r = 0; r < 3; ++r)
// {
// for (int c = 0; c < 3; ++c)
// {
// if (elementKBlock(r,c) != 0.0f)
// {
// tripletsK.push_back( Triplet(3*rowSharedCoordIndex + r, 3*colSharedCoordIndex + c, elementKBlock(r,c)) );
// }
// }
// }
// }
// }
//}
//
//// SPARSE MATRIC C
//SparseMatrix K(3*mesh->coords.size(), 3*mesh->coords.size());
//K.setFromTriplets(tripletsK.begin(), tripletsK.end());
//tripletsK.clear();
//Eigen::MatrixXf newK(3*numNonFixedVertices, 3*numNonFixedVertices);
//newK.setZero();
////cusp::array1d<int, cusp::device_memory> I(); // row indices
// //cusp::array1d<int, cusp::device_memory> J(); // column indices
// //cusp::array1d<float, cusp::device_memory> V(); // values
//Eigen::VectorXf newForce = Eigen::VectorXf::Zero(3*numNonFixedVertices);
//int nRowsNonFixed = 0;
//for (int rowI = 0; rowI < mesh->coords.size(); ++rowI)
//{
// // row fixed
// if (mesh->sharedIndexBase.count(rowI) > 0)
// {
// continue;
// }
//
// int nColsNonFixed = 0;
// for (int colI = 0; colI < mesh->coords.size(); ++colI)
// {
// if (mesh->sharedIndexBase.count(colI) > 0)
// {
// continue;
// }
// // DENSE MATRIX CODE
// // newK.block(3*nRowsNonFixed, 3*nColsNonFixed, 3, 3) += K.block(3*rowI, 3*colI, 3, 3);
//
// for (int r = 0; r < 3; ++r)
// {
// for (int c = 0; c < 3; ++c)
// {
// //I.
// //.push_back( Triplet(3*nRowsNonFixed + r, 3*nColsNonFixed + c, K.coeff(3*rowI + r, 3*colI + c)) );
// }
// }
//
// ++nColsNonFixed;
// }
//
// ++nRowsNonFixed;
//}
//
///*
//SparseMatrix newK(3*numNonFixedVertices, 3*numNonFixedVertices);;
//newK.setFromTriplets(tripletsNewK.begin(), tripletsNewK.end());
//tripletsNewK.clear();
//*/
//int nonFixedCount = 0;
//for (int ii = 0; ii < mesh->coords.size(); ++ii)
//{
// if (mesh->sharedIndexBase.count(ii) > 0)
// {
// continue;
// }
// newForce.block(3*nonFixedCount, 0, 3, 1) += totalForceVector.block(3*ii, 0, 3, 1);
// ++nonFixedCount;
//}
//
////Eigen::ConjugateGradient<SparseMatrix> cg;
////std::cout << "newK: " << newK;
////cg.compute(newK);
///*
//Eigen::VectorXf deltaX = Eigen::VectorXf::Random(3*numNonFixedVertices);
//cg.setMaxIterations(1);
//int i = 0;
//do
//{
// deltaX = cg.solveWithGuess(newForce,deltaX);
// std::cout << i << " : " << cg.error() << std::endl;
// ++i;
//} while (cg.info()!=Eigen::Success && i<100);
//*/
////Eigen::VectorXf deltaX(3*numNonFixedVertices);
////deltaX = cg.solve(newForce);
////std::cout << "Error: " << cg.error() << std::endl;
////const Eigen::VectorXf deltaX = chol.solve(newForce);
////std::cout << "newK: " << newK << std::endl;
////std::cout << "newForce: " << newForce << std::endl;
////std::cout << "deltaX: " << deltaX << std::endl;
//Eigen::VectorXf deltaX = newK.colPivHouseholderQr().solve(newForce);
//for (int ii = 0; ii < numNonFixedVertices; ++ii)
//{
// int sharedCoordIndex = nonFixedIndexes[ii];
// mesh->coords[sharedCoordIndex] += stepSize * deltaX.block(3*ii, 0, 3, 1);
//}
//}
|
b4dd842203f1a1f7e84733933af91441ccc09391.hip | // !!! This is a file automatically generated by hipify!!!
///sta programa calcula la versin paralelizada del algoritmo FFT_DIF_DIT_TD (VARIABLES TIPO DOUBLE)
///(21/01/2017)
///sta versin sirve para graficar en matlab los errores absolutos y relativos Caso: N^20, Li=524,000, Lo=N
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hipfft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_complex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[500],int vector_2[500],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,hipDoubleComplex *x,hipDoubleComplex *W,hipDoubleComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,hipDoubleComplex *z,hipDoubleComplex *W,hipDoubleComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
hipDoubleComplex *x_host;
hipDoubleComplex *W_host;
///hipDoubleComplex *y_host;
///hipDoubleComplex *z_host;
hipDoubleComplex *X_host;
hipDoubleComplex *x_device;
hipDoubleComplex *W_device;
hipDoubleComplex *y_device;
hipDoubleComplex *z_device;
hipDoubleComplex *X_device;
hipfftDoubleComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[500]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[500];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Nmero de elementos del vector de entrada
/// Li >>> Nmero de elementos de entrada diferentes de cero
/// Lo >>> Nmero de elementos de salida requeridos
/// loop >>> Nmero de iteraciones
/// muestras >>> Nmero de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el nmero de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de N_max
const int N_max = 20;
///Ingrese el valor de Li_max
const int Li_max = 524000;
///Ingrese el valor de Lo_max
const int Lo_max = 1048576;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Funcin principal
int main()
{
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
hipSetDevice(0);
hipGetDevice(&device);
if(device == 1)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
}
if(device == 0)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
}
//////////////////////////////////////////////////////////////////////////
int i,j,i_N,j_res,k_res,cont,i_prom,m;
double *parte_real;
double *parte_imag;
//float suma;
//float promedio[N_max];
FILE *da,*db;
//da = fopen("Tiempos_N20_LiN_LoVARIA_CUDA.bin","a+b"); //Crea o sobre escribe archivo
da = fopen("Resultados_N20_Li524000_LoN_real_CUDA_DO.bin","a+b"); //Crea o sobre escribe archivo
db = fopen("Resultados_N20_Li524000_LoN_imag_CUDA_DO.bin","a+b"); //Crea o sobre escribe archivo
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = N_max;i_N <= N_max;i_N++)
{
N = (int )pow(2,i_N);
//N = N_max;
printf("\n N = %d \n",N);
for(j_res=Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(k_res=Lo_max;k_res <= Lo_max;k_res++)
{
Lo=k_res;
printf("\n Li = %d Lo = %d",Li,Lo);
//////////////////////////////////////////////////////////
parte_real = (double*) malloc(Lo*sizeof(double));
parte_imag = (double*) malloc(Lo*sizeof(double));
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = 0.0;
parte_imag[m] = 0.0;
}
///Se abre el archivo binario
db_open = fopen("Entrada_real_N20_C.bin","rb");
dc_open = fopen("Entrada_imag_N20_C.bin","rb");
//suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
hipEvent_t start_app, stop_app;
hipEventCreate(&start_app);
hipEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
hipEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Clculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Funcin auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Funcin auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Funcin auxiliar del host para ejecutar la etapa de salida
etapa_salida();
///printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
///printf("\n Li = %d Lo = %d",Li,Lo);
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
///SUMATORIAS
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = parte_real[m] + cuCreal(X_host[m]);
parte_imag[m] = parte_imag[m] + cuCimag(X_host[m]);
//printf("\n X[%d] = %.4f + (%.4f)",m,creal(X[m]),cimag(X[m]));
//fprintf(dc,"%f %f\n",creal(X[m]),cimag(X[m]));
}
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
hipEventRecord(stop_app,0);
hipEventSynchronize(stop_app);
hipEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
//suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
hipEventDestroy(start_app);
hipEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
hipFree(x_device);
hipFree(W_device);
hipFree(y_device);
hipFree(z_device);
hipFree(X_device);
}
///////////////////////////////////
///PROMEDIO DE ERRORES
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = parte_real[m]/loop;
parte_imag[m] = parte_imag[m] /loop;
}
//////////////////////////////////
///Se imprimen los resultados en los archivos binarios
fwrite(parte_real,sizeof(double),Lo,da);
fwrite(parte_imag,sizeof(double),Lo,db);
//promedio[k_res-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
free(parte_real);
free(parte_imag);
}
}
}
//fwrite(promedio,sizeof(float),N_max,da);
fclose(da);
fclose(db);
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//sta funcin genera el vector de entrada x[n]
void vector_entrada_xn(int Li)
{
//Declaracin de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*N);
buffer_imag = (float*)malloc(sizeof(float)*N);
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),N,db_open);
fread(buffer_imag,sizeof(float),N,dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuDoubleComplex((double)buffer_real[k],(double)buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCreal(x_host[k]),cuCimag(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//sta funcin genera el arreglo W
void arreglo_W(int N)
{
//Declaracin de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuDoubleComplex((double)cos((2*CUDART_PI*n)/N),(double)(-1)*sin((2*CUDART_PI*n)/N));
/*
if(n == 255)
{
printf("\nW[%d] = %f + %f",n-1,cuCrealf(W_host[n-1]),cuCimagf(W_host[n-1]));
}
*/
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCreal(W_host[n]),cuCimag(W_host[n]));
}
*/
}
//sta funcin genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaracin de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[500];
int k[500];
int G;
int g,i,t,ta;
int Dipt[500],Dopt[500];
float distrapt,distrap;
int Pos,h,Poss;
int nk[500];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el nmero de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//sta funcin encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//sta funcin encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[500],int vector_2[500],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Funcin auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,n1,n2;
//Asignacin de memoria en el device para el arreglo "x_device"
hipMalloc((void**)&x_device,Li*sizeof(hipDoubleComplex));
//Se reserva memoria en el device para el arreglo "W_device"
hipMalloc((void**)&W_device,N*sizeof(hipDoubleComplex));
//Asignacin de memoria en el device para el arreglo "y"
hipMalloc((void**)&y_device,P*Dip*Dop*sizeof(hipDoubleComplex));
//Se pasa el arreglo x_host a x_device
hipMemcpy(x_device,x_host,Li*sizeof(hipDoubleComplex),hipMemcpyHostToDevice);
//Envo de los arreglos W hacia la memoria global del device
hipMemcpy(W_device,W_host,N*sizeof(hipDoubleComplex),hipMemcpyHostToDevice);
//Asignacin de memoria en el host para "y"
//y_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la funcin kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
hipLaunchKernelGGL(( inputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
hipMemcpy(y_host,y_device,sizeof(hipDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCreal(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimag(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//funcin kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,hipDoubleComplex *x,hipDoubleComplex *W,hipDoubleComplex *y)
{
int n1,n2;
hipDoubleComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generacin de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmul(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuDoubleComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Funcin auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignacin de memoria en el device para "z"
hipMalloc((void**)&z_device,P*Dip*Dop*sizeof(hipDoubleComplex));
//Asignacin de memoria en el host para "z"
//z_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*P*Dip*Dop);
//Asignacin de memoria en el device para "in" y "out"
hipMalloc((void**)&in,sizeof(hipfftDoubleComplex)*P*Dip*Dop);
hipMalloc((void**)&out,sizeof(hipfftDoubleComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
hipMemcpy(in,y_device,sizeof(hipDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se crea un plan
hipfftHandle plan;
hipfftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,HIPFFT_Z2Z,Dip*Dop);
//Ejecucin del plan
hipfftExecZ2Z(plan,in,out,HIPFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
hipMemcpy(z_device,out,sizeof(hipfftDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se destruye el plan
hipfftDestroy(plan);
//Se liberan los arreglos "in" y "out"
hipFree(in);
hipFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
hipMemcpy(z_host,z_device,sizeof(hipDoubleComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCreal(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimag(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Funcin auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int m;
//Asignacin de memoria en el device para "X"
hipMalloc((void**)&X_device,Lo*sizeof(hipDoubleComplex));
//Asignacin de memoria en el host para "X"
X_host = (hipDoubleComplex*)malloc(sizeof(hipDoubleComplex)*Lo);
//Dimensionamiento del grid para la funcin kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
hipLaunchKernelGGL(( outputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
hipMemcpy(X_host,X_device,sizeof(hipDoubleComplex)*Lo,hipMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %f + (%f)",m,cuCreal(X_host[m]),cuCimag(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//funcin kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,hipDoubleComplex *z,hipDoubleComplex *W,hipDoubleComplex *X)
{
//Declaracin de variables locales
int n1,k_aux,k1,k2,a,b;
hipDoubleComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Clculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Clculo de X(k) para 0<=k<=Dip-1.
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el mtodo directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
//printf("\nk = %d,k_aux = %d,k2 = %d,k1 = %d",k,k_aux,k2,k1);
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCadd(X[k],cuCmul(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el mtodo filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
/*
if(k == 256)
{
printf("\nW = %d, k = %d,k_aux = %d,k2 = %d,k1 = %d, b= %d,z= %d",(((k2+(P*(b)))*Dip)%N)-1,k,k_aux,k2,k1,b,(k1*Dop*P)+((Dop-1)*P)+ (k2%P));
}
*/
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCadd(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
t4 = cuCsub(t3,t2);
/*
if(k == 256)
{
printf("\nW= %d",(((k2+(P*(b)))*Dip)%N)-1);
}
*/
}
if(n1 == (Dop-1))
{
t5 = cuCadd(z[(k1*Dop*P)+(k2%P)],t4);
X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
t5 = cuCadd(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
} | b4dd842203f1a1f7e84733933af91441ccc09391.cu | ///Ésta programa calcula la versión paralelizada del algoritmo FFT_DIF_DIT_TD (VARIABLES TIPO DOUBLE)
///(21/01/2017)
///Ésta versión sirve para graficar en matlab los errores absolutos y relativos Caso: N^20, Li=524,000, Lo=N
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuComplex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIÓN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[500],int vector_2[500],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuDoubleComplex *x,cuDoubleComplex *W,cuDoubleComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuDoubleComplex *z,cuDoubleComplex *W,cuDoubleComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIÓN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuDoubleComplex *x_host;
cuDoubleComplex *W_host;
///cuDoubleComplex *y_host;
///cuDoubleComplex *z_host;
cuDoubleComplex *X_host;
cuDoubleComplex *x_device;
cuDoubleComplex *W_device;
cuDoubleComplex *y_device;
cuDoubleComplex *z_device;
cuDoubleComplex *X_device;
cufftDoubleComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[500]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[500];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Número de elementos del vector de entrada
/// Li >>> Número de elementos de entrada diferentes de cero
/// Lo >>> Número de elementos de salida requeridos
/// loop >>> Número de iteraciones
/// muestras >>> Número de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el número de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de N_max
const int N_max = 20;
///Ingrese el valor de Li_max
const int Li_max = 524000;
///Ingrese el valor de Lo_max
const int Lo_max = 1048576;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Función principal
int main()
{
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIÓN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
cudaSetDevice(0);
cudaGetDevice(&device);
if(device == 1)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
}
if(device == 0)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
}
//////////////////////////////////////////////////////////////////////////
int i,j,i_N,j_res,k_res,cont,i_prom,m;
double *parte_real;
double *parte_imag;
//float suma;
//float promedio[N_max];
FILE *da,*db;
//da = fopen("Tiempos_N20_LiN_LoVARIA_CUDA.bin","a+b"); //Crea o sobre escribe archivo
da = fopen("Resultados_N20_Li524000_LoN_real_CUDA_DO.bin","a+b"); //Crea o sobre escribe archivo
db = fopen("Resultados_N20_Li524000_LoN_imag_CUDA_DO.bin","a+b"); //Crea o sobre escribe archivo
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = N_max;i_N <= N_max;i_N++)
{
N = (int )pow(2,i_N);
//N = N_max;
printf("\n N = %d \n",N);
for(j_res=Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(k_res=Lo_max;k_res <= Lo_max;k_res++)
{
Lo=k_res;
printf("\n Li = %d Lo = %d",Li,Lo);
//////////////////////////////////////////////////////////
parte_real = (double*) malloc(Lo*sizeof(double));
parte_imag = (double*) malloc(Lo*sizeof(double));
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = 0.0;
parte_imag[m] = 0.0;
}
///Se abre el archivo binario
db_open = fopen("Entrada_real_N20_C.bin","rb");
dc_open = fopen("Entrada_imag_N20_C.bin","rb");
//suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
cudaEvent_t start_app, stop_app;
cudaEventCreate(&start_app);
cudaEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
cudaEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Cálculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Función auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Función auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Función auxiliar del host para ejecutar la etapa de salida
etapa_salida();
///printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
///printf("\n Li = %d Lo = %d",Li,Lo);
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
///SUMATORIAS
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = parte_real[m] + cuCreal(X_host[m]);
parte_imag[m] = parte_imag[m] + cuCimag(X_host[m]);
//printf("\n X[%d] = %.4f + (%.4f)",m,creal(X[m]),cimag(X[m]));
//fprintf(dc,"%f %f\n",creal(X[m]),cimag(X[m]));
}
////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
cudaEventRecord(stop_app,0);
cudaEventSynchronize(stop_app);
cudaEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
//suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
cudaEventDestroy(start_app);
cudaEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
cudaFree(x_device);
cudaFree(W_device);
cudaFree(y_device);
cudaFree(z_device);
cudaFree(X_device);
}
///////////////////////////////////
///PROMEDIO DE ERRORES
for(m=0;m<=Lo-1;m++)
{
parte_real[m] = parte_real[m]/loop;
parte_imag[m] = parte_imag[m] /loop;
}
//////////////////////////////////
///Se imprimen los resultados en los archivos binarios
fwrite(parte_real,sizeof(double),Lo,da);
fwrite(parte_imag,sizeof(double),Lo,db);
//promedio[k_res-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
free(parte_real);
free(parte_imag);
}
}
}
//fwrite(promedio,sizeof(float),N_max,da);
fclose(da);
fclose(db);
return EXIT_SUCCESS;
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Ésta función genera el vector de entrada x[n]
void vector_entrada_xn(int Li)
{
//Declaración de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*N);
buffer_imag = (float*)malloc(sizeof(float)*N);
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),N,db_open);
fread(buffer_imag,sizeof(float),N,dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuDoubleComplex((double)buffer_real[k],(double)buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCreal(x_host[k]),cuCimag(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//Ésta función genera el arreglo W
void arreglo_W(int N)
{
//Declaración de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuDoubleComplex((double)cos((2*CUDART_PI*n)/N),(double)(-1)*sin((2*CUDART_PI*n)/N));
/*
if(n == 255)
{
printf("\nW[%d] = %f + %f",n-1,cuCrealf(W_host[n-1]),cuCimagf(W_host[n-1]));
}
*/
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCreal(W_host[n]),cuCimag(W_host[n]));
}
*/
}
//Ésta función genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaración de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[500];
int k[500];
int G;
int g,i,t,ta;
int Dipt[500],Dopt[500];
float distrapt,distrap;
int Pos,h,Poss;
int nk[500];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el número de factores de "N"
factor(N);
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//Ésta función encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//Ésta función encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[500],int vector_2[500],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Función auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,n1,n2;
//Asignación de memoria en el device para el arreglo "x_device"
cudaMalloc((void**)&x_device,Li*sizeof(cuDoubleComplex));
//Se reserva memoria en el device para el arreglo "W_device"
cudaMalloc((void**)&W_device,N*sizeof(cuDoubleComplex));
//Asignación de memoria en el device para el arreglo "y"
cudaMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuDoubleComplex));
//Se pasa el arreglo x_host a x_device
cudaMemcpy(x_device,x_host,Li*sizeof(cuDoubleComplex),cudaMemcpyHostToDevice);
//Envío de los arreglos W hacia la memoria global del device
cudaMemcpy(W_device,W_host,N*sizeof(cuDoubleComplex),cudaMemcpyHostToDevice);
//Asignación de memoria en el host para "y"
//y_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la función kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
inputStage_kernel<<<gridDim,blockDim>>>(N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
cudaMemcpy(y_host,y_device,sizeof(cuDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCreal(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimag(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//función kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuDoubleComplex *x,cuDoubleComplex *W,cuDoubleComplex *y)
{
int n1,n2;
cuDoubleComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generación de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmul(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuDoubleComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Función auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignación de memoria en el device para "z"
cudaMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuDoubleComplex));
//Asignación de memoria en el host para "z"
//z_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*P*Dip*Dop);
//Asignación de memoria en el device para "in" y "out"
cudaMalloc((void**)&in,sizeof(cufftDoubleComplex)*P*Dip*Dop);
cudaMalloc((void**)&out,sizeof(cufftDoubleComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
cudaMemcpy(in,y_device,sizeof(cuDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se crea un plan
cufftHandle plan;
cufftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,CUFFT_Z2Z,Dip*Dop);
//Ejecución del plan
cufftExecZ2Z(plan,in,out,CUFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
cudaMemcpy(z_device,out,sizeof(cufftDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se destruye el plan
cufftDestroy(plan);
//Se liberan los arreglos "in" y "out"
cudaFree(in);
cudaFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
cudaMemcpy(z_host,z_device,sizeof(cuDoubleComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCreal(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimag(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Función auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int m;
//Asignación de memoria en el device para "X"
cudaMalloc((void**)&X_device,Lo*sizeof(cuDoubleComplex));
//Asignación de memoria en el host para "X"
X_host = (cuDoubleComplex*)malloc(sizeof(cuDoubleComplex)*Lo);
//Dimensionamiento del grid para la función kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
outputStage_kernel<<<gridDim,blockDim>>>(N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
cudaMemcpy(X_host,X_device,sizeof(cuDoubleComplex)*Lo,cudaMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %f + (%f)",m,cuCreal(X_host[m]),cuCimag(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//función kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuDoubleComplex *z,cuDoubleComplex *W,cuDoubleComplex *X)
{
//Declaración de variables locales
int n1,k_aux,k1,k2,a,b;
cuDoubleComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Cálculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Cálculo de X(k) para 0<=k<=Dip-1.
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCadd(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el método directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
//printf("\nk = %d,k_aux = %d,k2 = %d,k1 = %d",k,k_aux,k2,k1);
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCadd(X[k],cuCmul(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el método filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
/*
if(k == 256)
{
printf("\nW = %d, k = %d,k_aux = %d,k2 = %d,k1 = %d, b= %d,z= %d",(((k2+(P*(b)))*Dip)%N)-1,k,k_aux,k2,k1,b,(k1*Dop*P)+((Dop-1)*P)+ (k2%P));
}
*/
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCadd(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
t4 = cuCsub(t3,t2);
/*
if(k == 256)
{
printf("\nW= %d",(((k2+(P*(b)))*Dip)%N)-1);
}
*/
}
if(n1 == (Dop-1))
{
t5 = cuCadd(z[(k1*Dop*P)+(k2%P)],t4);
X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmul(t1,make_cuDoubleComplex(2*cuCreal(W[(((k2+(P*(b)))*Dip)%N)-1]),0.0));
t5 = cuCadd(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsub(t5,cuCmul(t1,cuConj(W[(((k2+(P*(b)))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
} |
46876f220bc8afa4a5e7aefee76b871983ac2bd1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <fstream>
using namespace std;
unsigned char s_box[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16};
unsigned char mul2[256] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05,
0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25,
0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45,
0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65,
0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85,
0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5,
0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5,
0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5};
unsigned char mul3[256] = {
0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41,
0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1,
0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1,
0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1,
0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81,
0x9b, 0x98, 0x9d, 0x9e, 0x97, 0x94, 0x91, 0x92, 0x83, 0x80, 0x85, 0x86, 0x8f, 0x8c, 0x89, 0x8a,
0xab, 0xa8, 0xad, 0xae, 0xa7, 0xa4, 0xa1, 0xa2, 0xb3, 0xb0, 0xb5, 0xb6, 0xbf, 0xbc, 0xb9, 0xba,
0xfb, 0xf8, 0xfd, 0xfe, 0xf7, 0xf4, 0xf1, 0xf2, 0xe3, 0xe0, 0xe5, 0xe6, 0xef, 0xec, 0xe9, 0xea,
0xcb, 0xc8, 0xcd, 0xce, 0xc7, 0xc4, 0xc1, 0xc2, 0xd3, 0xd0, 0xd5, 0xd6, 0xdf, 0xdc, 0xd9, 0xda,
0x5b, 0x58, 0x5d, 0x5e, 0x57, 0x54, 0x51, 0x52, 0x43, 0x40, 0x45, 0x46, 0x4f, 0x4c, 0x49, 0x4a,
0x6b, 0x68, 0x6d, 0x6e, 0x67, 0x64, 0x61, 0x62, 0x73, 0x70, 0x75, 0x76, 0x7f, 0x7c, 0x79, 0x7a,
0x3b, 0x38, 0x3d, 0x3e, 0x37, 0x34, 0x31, 0x32, 0x23, 0x20, 0x25, 0x26, 0x2f, 0x2c, 0x29, 0x2a,
0x0b, 0x08, 0x0d, 0x0e, 0x07, 0x04, 0x01, 0x02, 0x13, 0x10, 0x15, 0x16, 0x1f, 0x1c, 0x19, 0x1a};
unsigned char rcon[256] = {
0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a,
0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39,
0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a,
0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef,
0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc,
0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b,
0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3,
0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94,
0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20,
0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35,
0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f,
0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04,
0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63,
0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd,
0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d};
// Kernel function to add the elements of two arrays
/*__global__ void add(int n, unsigned char *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = s_box[i];
}*/
void KeyExpansionCore(unsigned char *in, unsigned char i)
{
// Rotatie stanga
// Simplifica
unsigned int *q = (unsigned int *)in;
*q = (*q >> 8) | ((*q & 0xff) << 24);
// S-Box 4 bytes
in[0] = s_box[in[0]];
in[1] = s_box[in[1]];
in[2] = s_box[in[2]];
in[3] = s_box[in[3]];
// RCon
in[0] ^= rcon[i];
}
void expandareCheie(unsigned char *cheieOriginala, unsigned char *expKeys, unsigned nivel)
{
// Nu functioneaza 256
unsigned lungimeCheie = nivel / 8;
unsigned lungimeCheieExpandata = (nivel == 128) ? 176 : (nivel == 192) ? 208 : (nivel == 256) ? 240 : 0;
memcpy(expKeys, cheieOriginala, lungimeCheie);
unsigned bytesGenerati = lungimeCheie;
unsigned iteratii_rcon = 1;
unsigned char temporar[4];
while (bytesGenerati < lungimeCheieExpandata)
{
temporar[0] = expKeys[bytesGenerati - 4];
temporar[1] = expKeys[bytesGenerati - 3];
temporar[2] = expKeys[bytesGenerati - 2];
temporar[3] = expKeys[bytesGenerati - 1];
if (bytesGenerati % lungimeCheie == 0)
{
KeyExpansionCore(temporar, iteratii_rcon++);
}
// Numai pentru AES-256
if (bytesGenerati % 32 == 16 && nivel == 256)
{
temporar[0] = s_box[temporar[0]];
temporar[1] = s_box[temporar[1]];
temporar[2] = s_box[temporar[2]];
temporar[3] = s_box[temporar[3]];
}
expKeys[bytesGenerati++] = expKeys[bytesGenerati - lungimeCheie] ^ temporar[0];
expKeys[bytesGenerati++] = expKeys[bytesGenerati - lungimeCheie] ^ temporar[1];
expKeys[bytesGenerati++] = expKeys[bytesGenerati - lungimeCheie] ^ temporar[2];
expKeys[bytesGenerati++] = expKeys[bytesGenerati - lungimeCheie] ^ temporar[3];
}
cout << expKeys;
}
int main(void)
{
ofstream g("da.txt");
//int N = 256;
//unsigned char *y;
unsigned nivel = 128;
unsigned char *cheie = nullptr;
unsigned char *expKeys = nullptr;
//int blockSize = 256;
//int numBlocks = (N + blockSize - 1) / blockSize;
// Allocate Unified Memory accessible from CPU or GPU
//hipMallocManaged(&y, N*sizeof(unsigned char));
if (nivel == 128)
{
hipMallocManaged(&cheie, 16*sizeof(unsigned char));
hipMemcpy(cheie, "u43x2l6gjng24edf", 16*sizeof(unsigned char), hipMemcpyHostToDevice);
g<<cheie<<endl;
hipMallocManaged(&expKeys, 176*sizeof(unsigned char));
}
if (nivel == 192)
{
hipMallocManaged(&cheie, 24*sizeof(unsigned char));
hipMemcpy(cheie, "pyehxfiikibqunkkbwyydlqq", 24*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMallocManaged(&expKeys, 208*sizeof(unsigned char));
}
if (nivel == 256)
{
hipMallocManaged(&cheie, 32*sizeof(unsigned char));
hipMemcpy(cheie, "bstipsymvkpascpmdqahvtdwusnhzexv", 32*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMallocManaged(&expKeys, 240*sizeof(unsigned char));
}
// Run kernel on 1M elements on the GPU
expandareCheie(cheie, expKeys, nivel);
g<<expKeys;
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
g.close();
// Free memory
hipFree(cheie);
hipFree(expKeys);
return 0;
} | 46876f220bc8afa4a5e7aefee76b871983ac2bd1.cu | #include <iostream>
#include <math.h>
#include <fstream>
using namespace std;
unsigned char s_box[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16};
unsigned char mul2[256] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05,
0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25,
0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45,
0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65,
0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85,
0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5,
0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5,
0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5};
unsigned char mul3[256] = {
0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41,
0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1,
0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1,
0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1,
0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81,
0x9b, 0x98, 0x9d, 0x9e, 0x97, 0x94, 0x91, 0x92, 0x83, 0x80, 0x85, 0x86, 0x8f, 0x8c, 0x89, 0x8a,
0xab, 0xa8, 0xad, 0xae, 0xa7, 0xa4, 0xa1, 0xa2, 0xb3, 0xb0, 0xb5, 0xb6, 0xbf, 0xbc, 0xb9, 0xba,
0xfb, 0xf8, 0xfd, 0xfe, 0xf7, 0xf4, 0xf1, 0xf2, 0xe3, 0xe0, 0xe5, 0xe6, 0xef, 0xec, 0xe9, 0xea,
0xcb, 0xc8, 0xcd, 0xce, 0xc7, 0xc4, 0xc1, 0xc2, 0xd3, 0xd0, 0xd5, 0xd6, 0xdf, 0xdc, 0xd9, 0xda,
0x5b, 0x58, 0x5d, 0x5e, 0x57, 0x54, 0x51, 0x52, 0x43, 0x40, 0x45, 0x46, 0x4f, 0x4c, 0x49, 0x4a,
0x6b, 0x68, 0x6d, 0x6e, 0x67, 0x64, 0x61, 0x62, 0x73, 0x70, 0x75, 0x76, 0x7f, 0x7c, 0x79, 0x7a,
0x3b, 0x38, 0x3d, 0x3e, 0x37, 0x34, 0x31, 0x32, 0x23, 0x20, 0x25, 0x26, 0x2f, 0x2c, 0x29, 0x2a,
0x0b, 0x08, 0x0d, 0x0e, 0x07, 0x04, 0x01, 0x02, 0x13, 0x10, 0x15, 0x16, 0x1f, 0x1c, 0x19, 0x1a};
unsigned char rcon[256] = {
0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a,
0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39,
0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a,
0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef,
0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc,
0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b,
0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3,
0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94,
0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20,
0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35,
0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f,
0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04,
0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63,
0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd,
0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d};
// Kernel function to add the elements of two arrays
/*__global__ void add(int n, unsigned char *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = s_box[i];
}*/
void KeyExpansionCore(unsigned char *in, unsigned char i)
{
// Rotatie stanga
// Simplifica
unsigned int *q = (unsigned int *)in;
*q = (*q >> 8) | ((*q & 0xff) << 24);
// S-Box 4 bytes
in[0] = s_box[in[0]];
in[1] = s_box[in[1]];
in[2] = s_box[in[2]];
in[3] = s_box[in[3]];
// RCon
in[0] ^= rcon[i];
}
void expandareCheie(unsigned char *cheieOriginala, unsigned char *expKeys, unsigned nivel)
{
// Nu functioneaza 256
unsigned lungimeCheie = nivel / 8;
unsigned lungimeCheieExpandata = (nivel == 128) ? 176 : (nivel == 192) ? 208 : (nivel == 256) ? 240 : 0;
memcpy(expKeys, cheieOriginala, lungimeCheie);
unsigned bytesGenerati = lungimeCheie;
unsigned iteratii_rcon = 1;
unsigned char temporar[4];
while (bytesGenerati < lungimeCheieExpandata)
{
temporar[0] = expKeys[bytesGenerati - 4];
temporar[1] = expKeys[bytesGenerati - 3];
temporar[2] = expKeys[bytesGenerati - 2];
temporar[3] = expKeys[bytesGenerati - 1];
if (bytesGenerati % lungimeCheie == 0)
{
KeyExpansionCore(temporar, iteratii_rcon++);
}
// Numai pentru AES-256
if (bytesGenerati % 32 == 16 && nivel == 256)
{
temporar[0] = s_box[temporar[0]];
temporar[1] = s_box[temporar[1]];
temporar[2] = s_box[temporar[2]];
temporar[3] = s_box[temporar[3]];
}
expKeys[bytesGenerati++] = expKeys[bytesGenerati - lungimeCheie] ^ temporar[0];
expKeys[bytesGenerati++] = expKeys[bytesGenerati - lungimeCheie] ^ temporar[1];
expKeys[bytesGenerati++] = expKeys[bytesGenerati - lungimeCheie] ^ temporar[2];
expKeys[bytesGenerati++] = expKeys[bytesGenerati - lungimeCheie] ^ temporar[3];
}
cout << expKeys;
}
int main(void)
{
ofstream g("da.txt");
//int N = 256;
//unsigned char *y;
unsigned nivel = 128;
unsigned char *cheie = nullptr;
unsigned char *expKeys = nullptr;
//int blockSize = 256;
//int numBlocks = (N + blockSize - 1) / blockSize;
// Allocate Unified Memory – accessible from CPU or GPU
//cudaMallocManaged(&y, N*sizeof(unsigned char));
if (nivel == 128)
{
cudaMallocManaged(&cheie, 16*sizeof(unsigned char));
cudaMemcpy(cheie, "u43x2l6gjng24edf", 16*sizeof(unsigned char), cudaMemcpyHostToDevice);
g<<cheie<<endl;
cudaMallocManaged(&expKeys, 176*sizeof(unsigned char));
}
if (nivel == 192)
{
cudaMallocManaged(&cheie, 24*sizeof(unsigned char));
cudaMemcpy(cheie, "pyehxfiikibqunkkbwyydlqq", 24*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMallocManaged(&expKeys, 208*sizeof(unsigned char));
}
if (nivel == 256)
{
cudaMallocManaged(&cheie, 32*sizeof(unsigned char));
cudaMemcpy(cheie, "bstipsymvkpascpmdqahvtdwusnhzexv", 32*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMallocManaged(&expKeys, 240*sizeof(unsigned char));
}
// Run kernel on 1M elements on the GPU
expandareCheie(cheie, expKeys, nivel);
g<<expKeys;
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
g.close();
// Free memory
cudaFree(cheie);
cudaFree(expKeys);
return 0;
} |
d3e85b1b603306cb4dd29d466d57df11f855222f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test_utils.h"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <metrics/adjusted_rand_index.cuh>
#include <metrics/contingencyMatrix.cuh>
#include <raft/cudart_utils.h>
#include <random>
namespace MLCommon {
namespace Metrics {
struct adjustedRandIndexParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
// if this is true, then it is assumed that `sameArrays` is also true
// further it also assumes `lowerLabelRange` and `upperLabelRange` are 0
bool testZeroArray;
};
template <typename T, typename MathT = int>
class adjustedRandIndexTest : public ::testing::TestWithParam<adjustedRandIndexParam> {
protected:
adjustedRandIndexTest() : firstClusterArray(0, stream), secondClusterArray(0, stream) {}
void SetUp() override
{
RAFT_CUDA_TRY(hipStreamCreate(&stream));
params = ::testing::TestWithParam<adjustedRandIndexParam>::GetParam();
nElements = params.nElements;
firstClusterArray.resize(nElements, stream);
secondClusterArray.resize(nElements, stream);
RAFT_CUDA_TRY(
hipMemsetAsync(firstClusterArray.data(), 0, firstClusterArray.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
hipMemsetAsync(secondClusterArray.data(), 0, secondClusterArray.size() * sizeof(T), stream));
if (!params.testZeroArray) {
SetUpDifferentArrays();
} else {
SetupZeroArray();
}
// allocating and initializing memory to the GPU
computed_adjusted_rand_index = compute_adjusted_rand_index<T, MathT>(
firstClusterArray.data(), secondClusterArray.data(), nElements, stream);
}
void TearDown() override { RAFT_CUDA_TRY(hipStreamDestroy(stream)); }
void SetUpDifferentArrays()
{
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); });
}
// calculating golden output
int numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int);
int* hGoldenOutput = (int*)malloc(sizeOfMat);
memset(hGoldenOutput, 0, sizeOfMat);
for (int i = 0; i < nElements; i++) {
int row = arr1[i] - lowerLabelRange;
int column = arr2[i] - lowerLabelRange;
hGoldenOutput[row * numUniqueClasses + column] += 1;
}
int sumOfNijCTwo = 0;
int* a = (int*)malloc(numUniqueClasses * sizeof(int));
int* b = (int*)malloc(numUniqueClasses * sizeof(int));
memset(a, 0, numUniqueClasses * sizeof(int));
memset(b, 0, numUniqueClasses * sizeof(int));
int sumOfAiCTwo = 0;
int sumOfBiCTwo = 0;
// calculating the sum of number of pairwise points in each index
// and also the reducing contingency matrix along row and column
for (int i = 0; i < numUniqueClasses; ++i) {
for (int j = 0; j < numUniqueClasses; ++j) {
int Nij = hGoldenOutput[i * numUniqueClasses + j];
sumOfNijCTwo += ((Nij) * (Nij - 1)) / 2;
a[i] += hGoldenOutput[i * numUniqueClasses + j];
b[i] += hGoldenOutput[j * numUniqueClasses + i];
}
}
// claculating the sum of number pairwise points in ever column sum
// claculating the sum of number pairwise points in ever row sum
for (int i = 0; i < numUniqueClasses; ++i) {
sumOfAiCTwo += ((a[i]) * (a[i] - 1)) / 2;
sumOfBiCTwo += ((b[i]) * (b[i] - 1)) / 2;
}
// calculating the ARI
double nCTwo = double(nElements) * double(nElements - 1) / 2.0;
double expectedIndex = (double(sumOfBiCTwo) * double(sumOfAiCTwo)) / double(nCTwo);
double maxIndex = (double(sumOfAiCTwo) + double(sumOfBiCTwo)) / 2.0;
double index = (double)sumOfNijCTwo;
if (maxIndex - expectedIndex)
truth_adjusted_rand_index = (index - expectedIndex) / (maxIndex - expectedIndex);
else
truth_adjusted_rand_index = 0;
raft::update_device(firstClusterArray.data(), &arr1[0], nElements, stream);
raft::update_device(secondClusterArray.data(), &arr2[0], nElements, stream);
}
void SetupZeroArray()
{
lowerLabelRange = 0;
upperLabelRange = 0;
truth_adjusted_rand_index = 1.0;
}
adjustedRandIndexParam params;
T lowerLabelRange, upperLabelRange;
rmm::device_uvector<T> firstClusterArray;
rmm::device_uvector<T> secondClusterArray;
int nElements = 0;
double truth_adjusted_rand_index = 0;
double computed_adjusted_rand_index = 0;
hipStream_t stream = 0;
};
const std::vector<adjustedRandIndexParam> inputs = {
{199, 1, 10, false, 0.000001, false},
{200, 15, 100, false, 0.000001, false},
{100, 1, 20, false, 0.000001, false},
{10, 1, 10, false, 0.000001, false},
{198, 1, 100, false, 0.000001, false},
{300, 3, 99, false, 0.000001, false},
{199, 1, 10, true, 0.000001, false},
{200, 15, 100, true, 0.000001, false},
{100, 1, 20, true, 0.000001, false},
// FIXME: disabled temporarily due to flaky test
// {10, 1, 10, true, 0.000001, false},
{198, 1, 100, true, 0.000001, false},
{300, 3, 99, true, 0.000001, false},
{199, 0, 0, false, 0.000001, true},
{200, 0, 0, false, 0.000001, true},
{100, 0, 0, false, 0.000001, true},
{10, 0, 0, false, 0.000001, true},
{198, 0, 0, false, 0.000001, true},
{300, 0, 0, false, 0.000001, true},
{199, 0, 0, true, 0.000001, true},
{200, 0, 0, true, 0.000001, true},
{100, 0, 0, true, 0.000001, true},
{10, 0, 0, true, 0.000001, true},
{198, 0, 0, true, 0.000001, true},
{300, 0, 0, true, 0.000001, true},
};
const std::vector<adjustedRandIndexParam> large_inputs = {
{2000000, 1, 1000, false, 0.000001, false},
{2000000, 1, 1000, true, 0.000001, false},
{2000000, 0, 0, false, 0.000001, true},
{2000000, 0, 0, true, 0.000001, true},
};
typedef adjustedRandIndexTest<int, int> ARI_ii;
TEST_P(ARI_ii, Result)
{
ASSERT_NEAR(computed_adjusted_rand_index, truth_adjusted_rand_index, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(adjusted_rand_index, ARI_ii, ::testing::ValuesIn(inputs));
typedef adjustedRandIndexTest<int, unsigned long long> ARI_il;
TEST_P(ARI_il, Result)
{
ASSERT_NEAR(computed_adjusted_rand_index, truth_adjusted_rand_index, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(adjusted_rand_index, ARI_il, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(adjusted_rand_index_large, ARI_il, ::testing::ValuesIn(large_inputs));
} // end namespace Metrics
} // end namespace MLCommon
| d3e85b1b603306cb4dd29d466d57df11f855222f.cu | /*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "test_utils.h"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <metrics/adjusted_rand_index.cuh>
#include <metrics/contingencyMatrix.cuh>
#include <raft/cudart_utils.h>
#include <random>
namespace MLCommon {
namespace Metrics {
struct adjustedRandIndexParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
// if this is true, then it is assumed that `sameArrays` is also true
// further it also assumes `lowerLabelRange` and `upperLabelRange` are 0
bool testZeroArray;
};
template <typename T, typename MathT = int>
class adjustedRandIndexTest : public ::testing::TestWithParam<adjustedRandIndexParam> {
protected:
adjustedRandIndexTest() : firstClusterArray(0, stream), secondClusterArray(0, stream) {}
void SetUp() override
{
RAFT_CUDA_TRY(cudaStreamCreate(&stream));
params = ::testing::TestWithParam<adjustedRandIndexParam>::GetParam();
nElements = params.nElements;
firstClusterArray.resize(nElements, stream);
secondClusterArray.resize(nElements, stream);
RAFT_CUDA_TRY(
cudaMemsetAsync(firstClusterArray.data(), 0, firstClusterArray.size() * sizeof(T), stream));
RAFT_CUDA_TRY(
cudaMemsetAsync(secondClusterArray.data(), 0, secondClusterArray.size() * sizeof(T), stream));
if (!params.testZeroArray) {
SetUpDifferentArrays();
} else {
SetupZeroArray();
}
// allocating and initializing memory to the GPU
computed_adjusted_rand_index = compute_adjusted_rand_index<T, MathT>(
firstClusterArray.data(), secondClusterArray.data(), nElements, stream);
}
void TearDown() override { RAFT_CUDA_TRY(cudaStreamDestroy(stream)); }
void SetUpDifferentArrays()
{
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); });
}
// calculating golden output
int numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int);
int* hGoldenOutput = (int*)malloc(sizeOfMat);
memset(hGoldenOutput, 0, sizeOfMat);
for (int i = 0; i < nElements; i++) {
int row = arr1[i] - lowerLabelRange;
int column = arr2[i] - lowerLabelRange;
hGoldenOutput[row * numUniqueClasses + column] += 1;
}
int sumOfNijCTwo = 0;
int* a = (int*)malloc(numUniqueClasses * sizeof(int));
int* b = (int*)malloc(numUniqueClasses * sizeof(int));
memset(a, 0, numUniqueClasses * sizeof(int));
memset(b, 0, numUniqueClasses * sizeof(int));
int sumOfAiCTwo = 0;
int sumOfBiCTwo = 0;
// calculating the sum of number of pairwise points in each index
// and also the reducing contingency matrix along row and column
for (int i = 0; i < numUniqueClasses; ++i) {
for (int j = 0; j < numUniqueClasses; ++j) {
int Nij = hGoldenOutput[i * numUniqueClasses + j];
sumOfNijCTwo += ((Nij) * (Nij - 1)) / 2;
a[i] += hGoldenOutput[i * numUniqueClasses + j];
b[i] += hGoldenOutput[j * numUniqueClasses + i];
}
}
// claculating the sum of number pairwise points in ever column sum
// claculating the sum of number pairwise points in ever row sum
for (int i = 0; i < numUniqueClasses; ++i) {
sumOfAiCTwo += ((a[i]) * (a[i] - 1)) / 2;
sumOfBiCTwo += ((b[i]) * (b[i] - 1)) / 2;
}
// calculating the ARI
double nCTwo = double(nElements) * double(nElements - 1) / 2.0;
double expectedIndex = (double(sumOfBiCTwo) * double(sumOfAiCTwo)) / double(nCTwo);
double maxIndex = (double(sumOfAiCTwo) + double(sumOfBiCTwo)) / 2.0;
double index = (double)sumOfNijCTwo;
if (maxIndex - expectedIndex)
truth_adjusted_rand_index = (index - expectedIndex) / (maxIndex - expectedIndex);
else
truth_adjusted_rand_index = 0;
raft::update_device(firstClusterArray.data(), &arr1[0], nElements, stream);
raft::update_device(secondClusterArray.data(), &arr2[0], nElements, stream);
}
void SetupZeroArray()
{
lowerLabelRange = 0;
upperLabelRange = 0;
truth_adjusted_rand_index = 1.0;
}
adjustedRandIndexParam params;
T lowerLabelRange, upperLabelRange;
rmm::device_uvector<T> firstClusterArray;
rmm::device_uvector<T> secondClusterArray;
int nElements = 0;
double truth_adjusted_rand_index = 0;
double computed_adjusted_rand_index = 0;
cudaStream_t stream = 0;
};
const std::vector<adjustedRandIndexParam> inputs = {
{199, 1, 10, false, 0.000001, false},
{200, 15, 100, false, 0.000001, false},
{100, 1, 20, false, 0.000001, false},
{10, 1, 10, false, 0.000001, false},
{198, 1, 100, false, 0.000001, false},
{300, 3, 99, false, 0.000001, false},
{199, 1, 10, true, 0.000001, false},
{200, 15, 100, true, 0.000001, false},
{100, 1, 20, true, 0.000001, false},
// FIXME: disabled temporarily due to flaky test
// {10, 1, 10, true, 0.000001, false},
{198, 1, 100, true, 0.000001, false},
{300, 3, 99, true, 0.000001, false},
{199, 0, 0, false, 0.000001, true},
{200, 0, 0, false, 0.000001, true},
{100, 0, 0, false, 0.000001, true},
{10, 0, 0, false, 0.000001, true},
{198, 0, 0, false, 0.000001, true},
{300, 0, 0, false, 0.000001, true},
{199, 0, 0, true, 0.000001, true},
{200, 0, 0, true, 0.000001, true},
{100, 0, 0, true, 0.000001, true},
{10, 0, 0, true, 0.000001, true},
{198, 0, 0, true, 0.000001, true},
{300, 0, 0, true, 0.000001, true},
};
const std::vector<adjustedRandIndexParam> large_inputs = {
{2000000, 1, 1000, false, 0.000001, false},
{2000000, 1, 1000, true, 0.000001, false},
{2000000, 0, 0, false, 0.000001, true},
{2000000, 0, 0, true, 0.000001, true},
};
typedef adjustedRandIndexTest<int, int> ARI_ii;
TEST_P(ARI_ii, Result)
{
ASSERT_NEAR(computed_adjusted_rand_index, truth_adjusted_rand_index, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(adjusted_rand_index, ARI_ii, ::testing::ValuesIn(inputs));
typedef adjustedRandIndexTest<int, unsigned long long> ARI_il;
TEST_P(ARI_il, Result)
{
ASSERT_NEAR(computed_adjusted_rand_index, truth_adjusted_rand_index, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(adjusted_rand_index, ARI_il, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(adjusted_rand_index_large, ARI_il, ::testing::ValuesIn(large_inputs));
} // end namespace Metrics
} // end namespace MLCommon
|
91ba2b9fd887c0d30686ced112a28c1fd4266d9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// textures;
%s
struct Params
{
%s
};
class Shader
{
public:
// params
float time;
%s
// init params
%s
// random generator (using simple LCG for now, TODO: use a better one)
unsigned int seed;
GPU float rand() { seed = 1103515245u*seed + 12345u; return (seed + 1) * 2.328306435454494e-10f; }
// the shader itself
#line 0
%s
#ifdef EVAL_RGBA
// volume accumulation using evalRgba
GPU float3 evalRayRgba(float3 S, float3 E, float T)
{
float rayLength = length(E - S);
float3 rayDirection = normalize(E - S);
float3 result = make_float3(0);
float seeThrough = 1;
for (float t = 0 /*rand() * 0.001f*/; t < rayLength; t += 0.001f)
{
float3 pos = S + t * rayDirection;
float4 rgba = clamp(evalRgba(pos, rayDirection, T), 0, 1);
if (rgba.w == 0) continue;
result += seeThrough * rgba.w * make_float3(rgba);
seeThrough *= 1 - rgba.w;
if (seeThrough < 0.01f) break;
}
return result;
}
#endif
};
GPU float2 intersectSlab(float p, float d, float2 slab)
{
if (fabs(d) < 0.0001f) return make_float2(-INF, INF);
float x1 = (slab.x - p) / d;
float x2 = (slab.y - p) / d;
if (x1 <= x2) return make_float2(x1, x2);
else return make_float2(x2, x1);
}
GPU float2 intersectIntervals(float2 a, float2 b)
{
if (a.x > b.x)
{
float2 temp = a; a = b; b = temp;
}
if (b.x > a.y) return make_float2(INF, -INF);
return make_float2(b.x, min(a.y, b.y));
}
GPU float2 intersectUnitCube(float3 p, float3 d)
{
float2 slab = make_float2(-1, 1);
float2 tx = intersectSlab(p.x, d.x, slab);
float2 ty = intersectSlab(p.y, d.y, slab);
float2 tz = intersectSlab(p.z, d.z, slab);
return intersectIntervals(tx, intersectIntervals(ty, tz));
}
class Rgba
{
public:
unsigned char r, g, b, a;
GPU Rgba(float3 rgb, float scale, float invGamma)
{
rgb = clamp(rgb * scale, 0.0f, 1.0f);
r = (unsigned char)(pow(rgb.x, invGamma) * 255);
g = (unsigned char)(pow(rgb.y, invGamma) * 255);
b = (unsigned char)(pow(rgb.z, invGamma) * 255);
a = 255;
}
GPU Rgba()
{
r = g = b = 0;
a = 1;
}
};
extern "C" {
__global__ void run(
float time,
int imgWidth,
int imgHeight,
Frame* frame,
float zoom,
float dist,
Params* params,
float3* hdrOut,
Rgba* ldrOut)
{
// get image pixel coordinates
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= imgHeight || j >= imgWidth) return;
// get normalized coordinates (vertical image axis maps to [-1,1])
float x = j * 2 - imgWidth;
float y = i * 2 - imgHeight;
x /= imgHeight; y /= imgHeight;
// setup ray
float3 rayOrigin, rayDirection;
if (dist <= 0)
{
rayOrigin = frame->getPointToWorld(make_float3(x / zoom, y / zoom, 0));
rayDirection = frame->getVectorToWorld(make_float3(0, 0, 1));
}
else
{
rayOrigin = frame->origin;
rayDirection = normalize(frame->getVectorToWorld(make_float3(x / zoom, y / zoom, dist)));
}
// clip the ray to the [-1,1]^3 cube
float2 interval = intersectUnitCube(rayOrigin, rayDirection);
float3 color;
Shader shader;
shader.init(params);
if (interval.x == INF) color = make_float3(0);
else
{
float3 start = rayOrigin + interval.x * rayDirection;
float3 end = rayOrigin + interval.y * rayDirection;
// initialize random generator
shader.seed = tex2D(randomTexture, (j & 255) + 0.5f, (i & 255) + 0.5f);
// evaluate ray
#ifdef EVAL_RGBA
color = shader.evalRayRgba(start, end, time);
#else
color = shader.evalRay(start, end, time);
#endif
}
if (hdrOut != 0)
hdrOut[i*imgWidth + j] = color;
if (ldrOut != 0)
ldrOut[i*imgWidth + j] = Rgba(color, 1, 1.0f / 2.2f);
}
}
| 91ba2b9fd887c0d30686ced112a28c1fd4266d9f.cu | // textures;
%s
struct Params
{
%s
};
class Shader
{
public:
// params
float time;
%s
// init params
%s
// random generator (using simple LCG for now, TODO: use a better one)
unsigned int seed;
GPU float rand() { seed = 1103515245u*seed + 12345u; return (seed + 1) * 2.328306435454494e-10f; }
// the shader itself
#line 0
%s
#ifdef EVAL_RGBA
// volume accumulation using evalRgba
GPU float3 evalRayRgba(float3 S, float3 E, float T)
{
float rayLength = length(E - S);
float3 rayDirection = normalize(E - S);
float3 result = make_float3(0);
float seeThrough = 1;
for (float t = 0 /*rand() * 0.001f*/; t < rayLength; t += 0.001f)
{
float3 pos = S + t * rayDirection;
float4 rgba = clamp(evalRgba(pos, rayDirection, T), 0, 1);
if (rgba.w == 0) continue;
result += seeThrough * rgba.w * make_float3(rgba);
seeThrough *= 1 - rgba.w;
if (seeThrough < 0.01f) break;
}
return result;
}
#endif
};
GPU float2 intersectSlab(float p, float d, float2 slab)
{
if (fabs(d) < 0.0001f) return make_float2(-INF, INF);
float x1 = (slab.x - p) / d;
float x2 = (slab.y - p) / d;
if (x1 <= x2) return make_float2(x1, x2);
else return make_float2(x2, x1);
}
GPU float2 intersectIntervals(float2 a, float2 b)
{
if (a.x > b.x)
{
float2 temp = a; a = b; b = temp;
}
if (b.x > a.y) return make_float2(INF, -INF);
return make_float2(b.x, min(a.y, b.y));
}
GPU float2 intersectUnitCube(float3 p, float3 d)
{
float2 slab = make_float2(-1, 1);
float2 tx = intersectSlab(p.x, d.x, slab);
float2 ty = intersectSlab(p.y, d.y, slab);
float2 tz = intersectSlab(p.z, d.z, slab);
return intersectIntervals(tx, intersectIntervals(ty, tz));
}
class Rgba
{
public:
unsigned char r, g, b, a;
GPU Rgba(float3 rgb, float scale, float invGamma)
{
rgb = clamp(rgb * scale, 0.0f, 1.0f);
r = (unsigned char)(pow(rgb.x, invGamma) * 255);
g = (unsigned char)(pow(rgb.y, invGamma) * 255);
b = (unsigned char)(pow(rgb.z, invGamma) * 255);
a = 255;
}
GPU Rgba()
{
r = g = b = 0;
a = 1;
}
};
extern "C" {
__global__ void run(
float time,
int imgWidth,
int imgHeight,
Frame* frame,
float zoom,
float dist,
Params* params,
float3* hdrOut,
Rgba* ldrOut)
{
// get image pixel coordinates
int j = threadIdx.x + blockDim.x * blockIdx.x;
int i = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= imgHeight || j >= imgWidth) return;
// get normalized coordinates (vertical image axis maps to [-1,1])
float x = j * 2 - imgWidth;
float y = i * 2 - imgHeight;
x /= imgHeight; y /= imgHeight;
// setup ray
float3 rayOrigin, rayDirection;
if (dist <= 0)
{
rayOrigin = frame->getPointToWorld(make_float3(x / zoom, y / zoom, 0));
rayDirection = frame->getVectorToWorld(make_float3(0, 0, 1));
}
else
{
rayOrigin = frame->origin;
rayDirection = normalize(frame->getVectorToWorld(make_float3(x / zoom, y / zoom, dist)));
}
// clip the ray to the [-1,1]^3 cube
float2 interval = intersectUnitCube(rayOrigin, rayDirection);
float3 color;
Shader shader;
shader.init(params);
if (interval.x == INF) color = make_float3(0);
else
{
float3 start = rayOrigin + interval.x * rayDirection;
float3 end = rayOrigin + interval.y * rayDirection;
// initialize random generator
shader.seed = tex2D(randomTexture, (j & 255) + 0.5f, (i & 255) + 0.5f);
// evaluate ray
#ifdef EVAL_RGBA
color = shader.evalRayRgba(start, end, time);
#else
color = shader.evalRay(start, end, time);
#endif
}
if (hdrOut != 0)
hdrOut[i*imgWidth + j] = color;
if (ldrOut != 0)
ldrOut[i*imgWidth + j] = Rgba(color, 1, 1.0f / 2.2f);
}
}
|
124e2e970faf2f7589c75e5eb618fb78050b05a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// CS 4402 - Dana Zagar - 250790176
#include <cstdio>
#include <ctime>
using namespace std;
// A small prime number to prevent overflow and make verification feasible.
const int MAX_COEFF = 103;
// Print polynomial output.
void print_polynomial(int* poly, int range)
{
for (int i = 0; i < range; i++)
{
printf("%2d ", poly[i]);
}
printf("\n\n");
}
// Generates a random polynomial of size n.
void random_polynomial(int* p, int n)
{
for (int i=0; i<n; i++) {
p[i] = rand() % MAX_COEFF;
}
}
// Serial C function to find reduced polynomial product.
// For verification purposes.
void multiply_polynomials_serial(int *x, int *y, int size, int *ans)
{
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
ans[i+j] = (ans[i+j] + x[i] * y[j]) % MAX_COEFF;
}
}
}
// First CUDA kernel to calculate the product terms over two given polynomials
// of size n, given n^2/t thread-blocks and t threads per.
__global__ void calculate_products(int *prods, int *x, int *y, size_t n)
{
int offset = blockIdx.x / n;
int index = (blockIdx.x % n) * n + threadIdx.x + blockDim.x*offset;
// Shift y and start over with x.
prods[index] = (x[blockIdx.x % n] * y[threadIdx.x + blockDim.x*offset]) % MAX_COEFF;
}
// Second CUDA kernel to reduce the products by combining like terms on each
// diagonal of the "2d" product matrix.
__global__ void reduce_polynomial(int *prods, int *ans, size_t n)
{
int i, j;
// Envision the product array as a 2d matrix tilted like a diamond.
// Each block represents a row of the diamond, i.e. a diagonal.
// If the block index is within the first half of the diamond, the
// block index dictates the row index.
if (blockIdx.x <= (2*n-2)/2)
{
i = blockIdx.x, j = 0;
}
// Otherwise, the block index dictates the column index.
else
{
i = n-1, j = (blockIdx.x % n) + 1;
}
// Sum over the diagonal given by the block index.
while (i >= 0 && j < n)
{
ans[blockIdx.x] = (ans[blockIdx.x] + prods[i*n + j]) % MAX_COEFF;
i--;
j++;
}
}
int main() {
srand(time(NULL));
int exponent, t;
// Input the number of terms.
printf("Input the desired number of terms in the polynomials. Enter an exponent on 2 [valid from 1-10] to define 2^input terms: ");
scanf("%d", &exponent);
if (exponent < 1 || exponent > 10)
{
printf("Invalid input. Program will terminate.\n\n");
return 0;
}
int n = 1 << exponent; // Number of terms is 2^exponent.
printf("%d terms; input polynomials are of degree %d.\n\n", n, n-1);
// Input t value.
printf("Input the number of threads per block, t. Enter a valid number [valid: 64, 128, 256, 512] to define t threads: ");
scanf("%d", &t);
if (t != 64 && t != 128 && t != 256 && t != 512)
{
printf("Invalid input. Program will terminate.\n\n");
return 0;
}
int *X = NULL; // First polynomial of degree n-1.
int *Y = NULL; // Second polynomial of degree n-1.
int *P = NULL; // Interim products.
int *Poly = NULL; // Final.
int *PolyV = NULL; // Verification answer.
X = new int[n];
Y = new int[n];
P = new int[n*n];
Poly = new int[2*n-1];
PolyV = new int[2*n-1];
// Initialize values.
random_polynomial(X, n);
random_polynomial(Y, n);
for (int i = 0; i < n*n; i++)
{
P[i] = 0;
}
for (int i = 0; i < 2*n-1; i++)
{
Poly[i] = 0;
PolyV[i] = 0;
}
// Step 1: Calculating products.
int *Xd, *Yd, *Pd;
hipMalloc((void **)&Xd, sizeof(int)*n);
hipMalloc((void **)&Yd, sizeof(int)*n);
hipMalloc((void **)&Pd, sizeof(int)*n*n);
hipMemcpy(Xd, X, sizeof(int)*n, hipMemcpyHostToDevice);
hipMemcpy(Yd, Y, sizeof(int)*n, hipMemcpyHostToDevice);
hipMemcpy(Pd, P, sizeof(int)*n*n, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( calculate_products), dim3((n*n)/t), dim3(t), 0, 0, Pd, Xd, Yd, n);
// Step 2: Reducing like terms.
int *Polyd;
hipMalloc((void **)&Polyd, sizeof(int)*2*n-1);
hipMemcpy(Polyd, Poly, sizeof(int)*2*n-1, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( reduce_polynomial), dim3(2*n-1), dim3(1), 0, 0, Pd, Polyd, n);
hipMemcpy(Poly, Polyd, sizeof(int)*2*n-1, hipMemcpyDeviceToHost);
// Print input, output.
printf("CUDA Program Output\n\n");
printf("First input polynomial:\n");
print_polynomial(X, n);
printf("Second input polynomial:\n");
print_polynomial(Y, n);
printf("Result:\n");
print_polynomial(Poly, 2*n-1);
// Step 3: Verify using serial C function.
printf("Verification with Serial C Output\n\n");
multiply_polynomials_serial(X, Y, n, PolyV);
printf("Result:\n");
print_polynomial(PolyV, 2*n-1);
delete [] X;
delete [] Y;
delete [] P;
delete [] Poly;
delete [] PolyV;
hipFree(Xd);
hipFree(Yd);
hipFree(Pd);
hipFree(Polyd);
return 0;
}
| 124e2e970faf2f7589c75e5eb618fb78050b05a1.cu | // CS 4402 - Dana Zagar - 250790176
#include <cstdio>
#include <ctime>
using namespace std;
// A small prime number to prevent overflow and make verification feasible.
const int MAX_COEFF = 103;
// Print polynomial output.
void print_polynomial(int* poly, int range)
{
for (int i = 0; i < range; i++)
{
printf("%2d ", poly[i]);
}
printf("\n\n");
}
// Generates a random polynomial of size n.
void random_polynomial(int* p, int n)
{
for (int i=0; i<n; i++) {
p[i] = rand() % MAX_COEFF;
}
}
// Serial C function to find reduced polynomial product.
// For verification purposes.
void multiply_polynomials_serial(int *x, int *y, int size, int *ans)
{
for (int i = 0; i < size; i++)
{
for (int j = 0; j < size; j++)
{
ans[i+j] = (ans[i+j] + x[i] * y[j]) % MAX_COEFF;
}
}
}
// First CUDA kernel to calculate the product terms over two given polynomials
// of size n, given n^2/t thread-blocks and t threads per.
__global__ void calculate_products(int *prods, int *x, int *y, size_t n)
{
int offset = blockIdx.x / n;
int index = (blockIdx.x % n) * n + threadIdx.x + blockDim.x*offset;
// Shift y and start over with x.
prods[index] = (x[blockIdx.x % n] * y[threadIdx.x + blockDim.x*offset]) % MAX_COEFF;
}
// Second CUDA kernel to reduce the products by combining like terms on each
// diagonal of the "2d" product matrix.
__global__ void reduce_polynomial(int *prods, int *ans, size_t n)
{
int i, j;
// Envision the product array as a 2d matrix tilted like a diamond.
// Each block represents a row of the diamond, i.e. a diagonal.
// If the block index is within the first half of the diamond, the
// block index dictates the row index.
if (blockIdx.x <= (2*n-2)/2)
{
i = blockIdx.x, j = 0;
}
// Otherwise, the block index dictates the column index.
else
{
i = n-1, j = (blockIdx.x % n) + 1;
}
// Sum over the diagonal given by the block index.
while (i >= 0 && j < n)
{
ans[blockIdx.x] = (ans[blockIdx.x] + prods[i*n + j]) % MAX_COEFF;
i--;
j++;
}
}
int main() {
srand(time(NULL));
int exponent, t;
// Input the number of terms.
printf("Input the desired number of terms in the polynomials. Enter an exponent on 2 [valid from 1-10] to define 2^input terms: ");
scanf("%d", &exponent);
if (exponent < 1 || exponent > 10)
{
printf("Invalid input. Program will terminate.\n\n");
return 0;
}
int n = 1 << exponent; // Number of terms is 2^exponent.
printf("%d terms; input polynomials are of degree %d.\n\n", n, n-1);
// Input t value.
printf("Input the number of threads per block, t. Enter a valid number [valid: 64, 128, 256, 512] to define t threads: ");
scanf("%d", &t);
if (t != 64 && t != 128 && t != 256 && t != 512)
{
printf("Invalid input. Program will terminate.\n\n");
return 0;
}
int *X = NULL; // First polynomial of degree n-1.
int *Y = NULL; // Second polynomial of degree n-1.
int *P = NULL; // Interim products.
int *Poly = NULL; // Final.
int *PolyV = NULL; // Verification answer.
X = new int[n];
Y = new int[n];
P = new int[n*n];
Poly = new int[2*n-1];
PolyV = new int[2*n-1];
// Initialize values.
random_polynomial(X, n);
random_polynomial(Y, n);
for (int i = 0; i < n*n; i++)
{
P[i] = 0;
}
for (int i = 0; i < 2*n-1; i++)
{
Poly[i] = 0;
PolyV[i] = 0;
}
// Step 1: Calculating products.
int *Xd, *Yd, *Pd;
cudaMalloc((void **)&Xd, sizeof(int)*n);
cudaMalloc((void **)&Yd, sizeof(int)*n);
cudaMalloc((void **)&Pd, sizeof(int)*n*n);
cudaMemcpy(Xd, X, sizeof(int)*n, cudaMemcpyHostToDevice);
cudaMemcpy(Yd, Y, sizeof(int)*n, cudaMemcpyHostToDevice);
cudaMemcpy(Pd, P, sizeof(int)*n*n, cudaMemcpyHostToDevice);
calculate_products<<<(n*n)/t, t>>>(Pd, Xd, Yd, n);
// Step 2: Reducing like terms.
int *Polyd;
cudaMalloc((void **)&Polyd, sizeof(int)*2*n-1);
cudaMemcpy(Polyd, Poly, sizeof(int)*2*n-1, cudaMemcpyHostToDevice);
reduce_polynomial<<<2*n-1, 1>>>(Pd, Polyd, n);
cudaMemcpy(Poly, Polyd, sizeof(int)*2*n-1, cudaMemcpyDeviceToHost);
// Print input, output.
printf("CUDA Program Output\n\n");
printf("First input polynomial:\n");
print_polynomial(X, n);
printf("Second input polynomial:\n");
print_polynomial(Y, n);
printf("Result:\n");
print_polynomial(Poly, 2*n-1);
// Step 3: Verify using serial C function.
printf("Verification with Serial C Output\n\n");
multiply_polynomials_serial(X, Y, n, PolyV);
printf("Result:\n");
print_polynomial(PolyV, 2*n-1);
delete [] X;
delete [] Y;
delete [] P;
delete [] Poly;
delete [] PolyV;
cudaFree(Xd);
cudaFree(Yd);
cudaFree(Pd);
cudaFree(Polyd);
return 0;
}
|
3469c96d7900a0c92568d52d404952acee9c790d.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHStorageCopy.h"
#include "THHGeneral.h"
#include "THHHalf.h"
#include "THHTensorCopy.h"
#include "THHTensor.hpp"
#include "THHStorage.hpp"
#include "generic/THCStorageCopy.cu"
#include "THHGenerateAllTypes.h"
| 3469c96d7900a0c92568d52d404952acee9c790d.cu | #include "THCStorageCopy.h"
#include "THCGeneral.h"
#include "THCHalf.h"
#include "THCTensorCopy.h"
#include "THCTensor.hpp"
#include "THCStorage.hpp"
#include "generic/THCStorageCopy.cu"
#include "THCGenerateAllTypes.h"
|
bbd38958bc821c72fae2e4a2c0c2ea330de4d9b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "matmult_kernels.h"
#include <stdio.h>
#include "rocblas.h"
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
extern "C"{
void matmult_gpu1(int m, int n, int k, double *A, double *B, double *C){
//allocate memory on GPU
double* d_A;
double* d_B;
double* d_C;
hipMalloc((void**) &d_A, m*k*sizeof(double));
hipMalloc((void**) &d_B, n*k*sizeof(double));
hipMalloc((void**) &d_C, m*n*sizeof(double));
//move A and B to GPU
hipMemcpy(d_A, A, m*k*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, n*k*sizeof(double), hipMemcpyHostToDevice);
//call kernel
hipLaunchKernelGGL(( matmult_kernel1), dim3(1),dim3(1), 0, 0, m, n, k, d_A, d_B, d_C);
hipDeviceSynchronize();
//move C back to host
hipMemcpy(C, d_C, m*n*sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
void matmult_gpu2(int m, int n, int k, double *A, double *B, double *C){
//allocate memory on GPU
double* d_A;
double* d_B;
double* d_C;
int bs = 16;
hipMalloc((void**) &d_A, m*k*sizeof(double));
hipMalloc((void**) &d_B, n*k*sizeof(double));
hipMalloc((void**) &d_C, m*n*sizeof(double));
//move A and B to GPU
hipMemcpy(d_A, A, m*k*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, n*k*sizeof(double), hipMemcpyHostToDevice);
//number of blocks is ceil of N/bs
if(getenv("BLOCK_SIZE")!=NULL){
bs = atoi(getenv("BLOCK_SIZE"));
}
int mblocks = m/bs + (int) (m%bs!=0);
int nblocks = n/bs + (int) (n%bs!=0);
//call kernel
hipLaunchKernelGGL(( matmult_kernel2), dim3(dim3 (nblocks,mblocks)),dim3(dim3 (bs,bs)), 0, 0, m, n, k, d_A, d_B, d_C);
checkCudaErrors(hipDeviceSynchronize());
//move C back to host
hipMemcpy(C, d_C, m*n*sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
void matmult_gpu3(int m, int n, int k, double *A, double *B, double *C){
//allocate memory on GPU
double* d_A;
double* d_B;
double* d_C;
int bs = 16;
hipMalloc((void**) &d_A, m*k*sizeof(double));
hipMalloc((void**) &d_B, n*k*sizeof(double));
hipMalloc((void**) &d_C, m*n*sizeof(double));
//move A and B to GPU
hipMemcpy(d_A, A, m*k*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, n*k*sizeof(double), hipMemcpyHostToDevice);
//number of blocks is ceil of N/bs
if(getenv("BLOCK_SIZE")!=NULL){
int bs = atoi(getenv("BLOCK_SIZE"));
}
int mblocks = m/bs + (int) (m%bs!=0);
int nblocks = n/bs/2 + (int) (n%(bs*2)!=0);
//call kernel
hipLaunchKernelGGL(( matmult_kernel3), dim3(dim3 (nblocks,mblocks)),dim3(dim3 (bs,bs)), 0, 0, m, n, k, d_A, d_B, d_C);
hipDeviceSynchronize();
//move C back to host
hipMemcpy(C, d_C, m*n*sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
#define BSGPU4 16
void matmult_gpu4(int m, int n, int k, double *A, double *B, double *C){
//number of elements to compute in each thread
int s;
if(getenv("NUM_ELEM_PER_THREAD")!=NULL){
s = atoi(getenv("NUM_ELEM_PER_THREAD"));
if(s>16)
fprintf( stderr,"numer of elements per thread cannot exceed 16");
} else{
s = 8;
}
//allocate memory on GPU
double* d_A;
double* d_B;
double* d_C;
hipMalloc((void**) &d_A, m*k*sizeof(double));
hipMalloc((void**) &d_B, n*k*sizeof(double));
hipMalloc((void**) &d_C, m*n*sizeof(double));
//move A and B to GPU
hipMemcpy(d_A, A, m*k*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, n*k*sizeof(double), hipMemcpyHostToDevice);
//number of blocks is ceil of N/bs
int mblocks = m/BSGPU4 + (int) (m%BSGPU4!=0);
int nblocks = n/BSGPU4/s + (int) (n%(BSGPU4*s)!=0);
//call kernel
hipLaunchKernelGGL(( matmult_kernel4), dim3(dim3 (nblocks,mblocks)),dim3(dim3 (BSGPU4,BSGPU4)), 0, 0, m, n, k, d_A, d_B, d_C, s);
hipDeviceSynchronize();
//move C back to host
hipMemcpy(C, d_C, m*n*sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
void matmult_gpu5(int m, int n, int k, double *A, double *B, double *C){
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = k;
d_A.height = m;
size_t size = k * m * sizeof(double);
hipMalloc((void**) &d_A.elements, size);
hipMemcpy(d_A.elements, A, size, hipMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = n;
d_B.height = k;
size = n * k * sizeof(double);
hipMalloc((void**) &d_B.elements, size);
hipMemcpy(d_B.elements, B, size,
hipMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = n;
d_C.height = m;
size = m * n * sizeof(double);
hipMalloc((void**) &d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(n / dimBlock.x, m / dimBlock.y);
hipLaunchKernelGGL(( gpu5_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
// Read C from device memory
hipMemcpy(C, d_C.elements, size, hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
void matmult_gpulib(int m, int n, int k, double *A, double *B, double *C) {
int lda=m,ldb=k,ldc=m;
double alf = 1.0;
double bet = 0.0;
double *alpha = &alf;
double *beta = &bet;
//double *d_alpha;
//double *d_beta;
double* d_A;
double* d_B;
double* d_C;
hipMalloc((void **)&d_A, m * k * sizeof(double));
hipMalloc((void **)&d_B, k * n * sizeof(double));
hipMalloc((void **)&d_C, n * m * sizeof(double));
//hipMalloc((void **)&d_alpha, sizeof(double));
//hipMalloc((void **)&d_beta, sizeof(double));
hipMemcpy(d_A, A, m * k * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, k * n * sizeof(double), hipMemcpyHostToDevice);
//hipMemcpy(d_alpha, alpha, sizeof(double), hipMemcpyHostToDevice);
//hipMemcpy(d_beta, beta, sizeof(double), hipMemcpyHostToDevice);
// Create a handle for CUBLAS
hipblasHandle_t handle;
hipblasCreate(&handle);
// Do the actual multiplication using the library function
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc);
// Destroy the handle
hipblasDestroy(handle);
// Read C from device memory
hipMemcpy(C, d_C, n * m * sizeof(double), hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
} | bbd38958bc821c72fae2e4a2c0c2ea330de4d9b7.cu | #include "matmult_kernels.h"
#include <stdio.h>
#include "cublas_v2.h"
#include <cuda_runtime.h>
#include <helper_cuda.h>
extern "C"{
void matmult_gpu1(int m, int n, int k, double *A, double *B, double *C){
//allocate memory on GPU
double* d_A;
double* d_B;
double* d_C;
cudaMalloc((void**) &d_A, m*k*sizeof(double));
cudaMalloc((void**) &d_B, n*k*sizeof(double));
cudaMalloc((void**) &d_C, m*n*sizeof(double));
//move A and B to GPU
cudaMemcpy(d_A, A, m*k*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, n*k*sizeof(double), cudaMemcpyHostToDevice);
//call kernel
matmult_kernel1<<<1,1>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
//move C back to host
cudaMemcpy(C, d_C, m*n*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
void matmult_gpu2(int m, int n, int k, double *A, double *B, double *C){
//allocate memory on GPU
double* d_A;
double* d_B;
double* d_C;
int bs = 16;
cudaMalloc((void**) &d_A, m*k*sizeof(double));
cudaMalloc((void**) &d_B, n*k*sizeof(double));
cudaMalloc((void**) &d_C, m*n*sizeof(double));
//move A and B to GPU
cudaMemcpy(d_A, A, m*k*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, n*k*sizeof(double), cudaMemcpyHostToDevice);
//number of blocks is ceil of N/bs
if(getenv("BLOCK_SIZE")!=NULL){
bs = atoi(getenv("BLOCK_SIZE"));
}
int mblocks = m/bs + (int) (m%bs!=0);
int nblocks = n/bs + (int) (n%bs!=0);
//call kernel
matmult_kernel2<<<dim3 (nblocks,mblocks),dim3 (bs,bs)>>>(m, n, k, d_A, d_B, d_C);
checkCudaErrors(cudaDeviceSynchronize());
//move C back to host
cudaMemcpy(C, d_C, m*n*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
void matmult_gpu3(int m, int n, int k, double *A, double *B, double *C){
//allocate memory on GPU
double* d_A;
double* d_B;
double* d_C;
int bs = 16;
cudaMalloc((void**) &d_A, m*k*sizeof(double));
cudaMalloc((void**) &d_B, n*k*sizeof(double));
cudaMalloc((void**) &d_C, m*n*sizeof(double));
//move A and B to GPU
cudaMemcpy(d_A, A, m*k*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, n*k*sizeof(double), cudaMemcpyHostToDevice);
//number of blocks is ceil of N/bs
if(getenv("BLOCK_SIZE")!=NULL){
int bs = atoi(getenv("BLOCK_SIZE"));
}
int mblocks = m/bs + (int) (m%bs!=0);
int nblocks = n/bs/2 + (int) (n%(bs*2)!=0);
//call kernel
matmult_kernel3<<<dim3 (nblocks,mblocks),dim3 (bs,bs)>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
//move C back to host
cudaMemcpy(C, d_C, m*n*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
#define BSGPU4 16
void matmult_gpu4(int m, int n, int k, double *A, double *B, double *C){
//number of elements to compute in each thread
int s;
if(getenv("NUM_ELEM_PER_THREAD")!=NULL){
s = atoi(getenv("NUM_ELEM_PER_THREAD"));
if(s>16)
fprintf( stderr,"numer of elements per thread cannot exceed 16");
} else{
s = 8;
}
//allocate memory on GPU
double* d_A;
double* d_B;
double* d_C;
cudaMalloc((void**) &d_A, m*k*sizeof(double));
cudaMalloc((void**) &d_B, n*k*sizeof(double));
cudaMalloc((void**) &d_C, m*n*sizeof(double));
//move A and B to GPU
cudaMemcpy(d_A, A, m*k*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, n*k*sizeof(double), cudaMemcpyHostToDevice);
//number of blocks is ceil of N/bs
int mblocks = m/BSGPU4 + (int) (m%BSGPU4!=0);
int nblocks = n/BSGPU4/s + (int) (n%(BSGPU4*s)!=0);
//call kernel
matmult_kernel4<<<dim3 (nblocks,mblocks),dim3 (BSGPU4,BSGPU4)>>>(m, n, k, d_A, d_B, d_C, s);
cudaDeviceSynchronize();
//move C back to host
cudaMemcpy(C, d_C, m*n*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
void matmult_gpu5(int m, int n, int k, double *A, double *B, double *C){
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = k;
d_A.height = m;
size_t size = k * m * sizeof(double);
cudaMalloc((void**) &d_A.elements, size);
cudaMemcpy(d_A.elements, A, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = n;
d_B.height = k;
size = n * k * sizeof(double);
cudaMalloc((void**) &d_B.elements, size);
cudaMemcpy(d_B.elements, B, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = n;
d_C.height = m;
size = m * n * sizeof(double);
cudaMalloc((void**) &d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(n / dimBlock.x, m / dimBlock.y);
gpu5_kernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(C, d_C.elements, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
void matmult_gpulib(int m, int n, int k, double *A, double *B, double *C) {
int lda=m,ldb=k,ldc=m;
double alf = 1.0;
double bet = 0.0;
double *alpha = &alf;
double *beta = &bet;
//double *d_alpha;
//double *d_beta;
double* d_A;
double* d_B;
double* d_C;
cudaMalloc((void **)&d_A, m * k * sizeof(double));
cudaMalloc((void **)&d_B, k * n * sizeof(double));
cudaMalloc((void **)&d_C, n * m * sizeof(double));
//cudaMalloc((void **)&d_alpha, sizeof(double));
//cudaMalloc((void **)&d_beta, sizeof(double));
cudaMemcpy(d_A, A, m * k * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, k * n * sizeof(double), cudaMemcpyHostToDevice);
//cudaMemcpy(d_alpha, alpha, sizeof(double), cudaMemcpyHostToDevice);
//cudaMemcpy(d_beta, beta, sizeof(double), cudaMemcpyHostToDevice);
// Create a handle for CUBLAS
cublasHandle_t handle;
cublasCreate(&handle);
// Do the actual multiplication using the library function
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc);
// Destroy the handle
cublasDestroy(handle);
// Read C from device memory
cudaMemcpy(C, d_C, n * m * sizeof(double), cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
} |
f5326d76e0da76e7965203ebe2b310752ab6f2fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/ztrtri_diag_batched.cu, normal z -> s, Wed Jan 2 14:18:51 2019
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named strtri_diag.cu to avoid name conflict with src/strtri.o
in the library. The actual kernels are in strtri_lower.cu and strtri_upper.cu
*/
#include "magma_internal.h"
#define TRTRI_BATCHED
#include "strtri.cuh"
/***************************************************************************//**
Purpose
-------
STRTRI_DIAG inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in strsm.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array A of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = MagmaUpper, the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = MagmaLower, the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = MagmaUnit, the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of each array A. LDDA >= max(1,N).
@param[out]
dinvA_array Array of pointers, dimension (batchCount).
Each is a REAL array dinvA of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
resetozero INTEGER
If not zero, each array dinvA will be reset to all zeros
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trtri_diag_batched
*******************************************************************************/
extern "C" void
magmablas_strtri_diag_batched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
float const * const *dA_array, magma_int_t ldda,
float **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
if ( resetozero ) {
magmablas_slaset_batched(MagmaFull, magma_roundup( n, NB ), NB, MAGMA_S_ZERO, MAGMA_S_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_slaset_batched
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
hipLaunchKernelGGL(( strtri_diag_lower_kernel_batched)
, dim3(diaggrid), dim3(IB), 0, queue->cuda_stream() ,
diag, n, dA_array, ldda, dinvA_array );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_sgemm16_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm16_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_sgemm32_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm32_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_sgemm64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_sgemm_above64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part3_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
hipLaunchKernelGGL(( strtri_diag_upper_kernel_batched)
, dim3(diaggrid), dim3(IB), 0, queue->cuda_stream() ,
diag, n, dA_array, ldda, dinvA_array );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_sgemm16_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm16_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_sgemm32_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm32_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_sgemm64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_sgemm_above64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part3_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
| f5326d76e0da76e7965203ebe2b310752ab6f2fe.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/ztrtri_diag_batched.cu, normal z -> s, Wed Jan 2 14:18:51 2019
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named strtri_diag.cu to avoid name conflict with src/strtri.o
in the library. The actual kernels are in strtri_lower.cu and strtri_upper.cu
*/
#include "magma_internal.h"
#define TRTRI_BATCHED
#include "strtri.cuh"
/***************************************************************************//**
Purpose
-------
STRTRI_DIAG inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in strsm.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array A of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = MagmaUpper, the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = MagmaLower, the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = MagmaUnit, the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of each array A. LDDA >= max(1,N).
@param[out]
dinvA_array Array of pointers, dimension (batchCount).
Each is a REAL array dinvA of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
resetozero INTEGER
If not zero, each array dinvA will be reset to all zeros
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trtri_diag_batched
*******************************************************************************/
extern "C" void
magmablas_strtri_diag_batched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
float const * const *dA_array, magma_int_t ldda,
float **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
if ( resetozero ) {
magmablas_slaset_batched(MagmaFull, magma_roundup( n, NB ), NB, MAGMA_S_ZERO, MAGMA_S_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_slaset_batched
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
strtri_diag_lower_kernel_batched
<<< diaggrid, IB, 0, queue->cuda_stream() >>>
( diag, n, dA_array, ldda, dinvA_array );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
triple_sgemm16_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm16_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
triple_sgemm32_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm32_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
triple_sgemm64_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm64_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
triple_sgemm_above64_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm_above64_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm_above64_part3_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
strtri_diag_upper_kernel_batched
<<< diaggrid, IB, 0, queue->cuda_stream() >>>
( diag, n, dA_array, ldda, dinvA_array );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
triple_sgemm16_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm16_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
triple_sgemm32_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm32_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
triple_sgemm64_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm64_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
triple_sgemm_above64_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm_above64_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_sgemm_above64_part3_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
|
6b47e8a79b1f55805724a6d729cae4e5a55f5a91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void mat_add(float *a, float *b, float *c, int limit)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < limit)
{
c[i] = a[i] + b[i];
}
} | 6b47e8a79b1f55805724a6d729cae4e5a55f5a91.cu | #include "includes.h"
__global__ void mat_add(float *a, float *b, float *c, int limit)
{
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < limit)
{
c[i] = a[i] + b[i];
}
} |
c7aa26ea4d0d879f2b5dbc77556fb0f36561cfea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: ndtrung
#include "TwoStepBDNVTGPU.cuh"
#include "TwoStepBDNVTRigidGPU.cuh"
#include "saruprngCUDA.h"
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
/*! \file TwoStepBDNVTGPU.cu
\brief Defines GPU kernel code for BDNVT integration on the GPU. Used by TwoStepBDNVTGPU.
*/
//! Shared memory array for gpu_bdnvt_step_two_kernel()
extern __shared__ float s_gammas[];
//! Takes the first half-step forward in the BDNVT integration on a group of particles with
/*! \param d_pos array of particle positions and types
\param d_vel array of particle velocities
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma List of per-type gammas
\param n_types Number of particle types in the simulation
\param gamma_diam If true, use particle diameters as gamma. If false, read from d_gamma
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
This kernel is implemented in a very similar manner to gpu_nve_step_one_kernel(), see it for design details.
Random number generation is done per thread with Saru's 3-seed constructor. The seeds are, the time step,
the particle tag, and the user-defined seed.
This kernel must be launched with enough dynamic shared memory per block to read in d_gamma
*/
extern "C" __global__
void gpu_bdnvt_bdforce_kernel(const Scalar4 *d_pos,
const Scalar4 *d_vel,
const Scalar *d_diameter,
const unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
float4 *d_net_force,
float *d_gamma,
unsigned int n_types,
bool gamma_diam,
unsigned int timestep,
unsigned int seed,
float T,
float deltaT,
float D)
{
if (!gamma_diam)
{
// read in the gammas (1 dimensional array)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x];
}
__syncthreads();
}
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// calculate the additional BD force
// read the current particle velocity (MEM TRANSFER: 16 bytes)
float4 vel = d_vel[idx];
// read in the tag of our particle.
// (MEM TRANSFER: 4 bytes)
unsigned int ptag = d_tag[idx];
// calculate the magintude of the random force
float gamma;
if (gamma_diam)
{
// read in the tag of our particle.
// (MEM TRANSFER: 4 bytes)
gamma = d_diameter[idx];
}
else
{
// read in the type of our particle. A texture read of only the fourth part of the position float4
// (where type is stored) is used.
unsigned int typ = __float_as_int(d_pos[idx].w);
gamma = s_gammas[typ];
}
float coeff = sqrtf(6.0f * gamma * T / deltaT);
float3 bd_force = make_float3(0.0f, 0.0f, 0.0f);
//Initialize the Random Number Generator and generate the 3 random numbers
SaruGPU s(ptag, timestep, seed); // 3 dimensional seeding
float randomx=s.f(-1.0, 1.0);
float randomy=s.f(-1.0, 1.0);
float randomz=s.f(-1.0, 1.0);
bd_force.x = randomx*coeff - gamma*vel.x;
bd_force.y = randomy*coeff - gamma*vel.y;
if (D > 2.0f)
bd_force.z = randomz*coeff - gamma*vel.z;
// read in the net force
float4 fi = d_net_force[idx];
// write out data (MEM TRANSFER: 32 bytes)
fi.x += bd_force.x;
fi.y += bd_force.y;
fi.z += bd_force.z;
d_net_force[idx] = fi;
}
}
/*! \param d_pos array of particle positions and types
\param d_vel array of particle velocities
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param bdnvt_args Collected arguments for gpu_bdnvt_step_two_kernel()
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
*/
hipError_t gpu_bdnvt_force( const Scalar4 *d_pos,
const Scalar4 *d_vel,
const Scalar *d_diameter,
const unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
float4 *d_net_force,
const bdnvt_step_two_args& bdnvt_args,
float deltaT,
float D)
{
// setup the grid to run the kernel
int block_size = 256;
dim3 grid( (group_size/block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_bdnvt_bdforce_kernel), dim3(grid), dim3(threads), sizeof(float)*bdnvt_args.n_types , 0,
d_pos,
d_vel,
d_diameter,
d_tag,
d_group_members,
group_size,
d_net_force,
bdnvt_args.d_gamma,
bdnvt_args.n_types,
bdnvt_args.gamma_diam,
bdnvt_args.timestep,
bdnvt_args.seed,
bdnvt_args.T,
deltaT,
D);
return hipSuccess;
}
| c7aa26ea4d0d879f2b5dbc77556fb0f36561cfea.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: ndtrung
#include "TwoStepBDNVTGPU.cuh"
#include "TwoStepBDNVTRigidGPU.cuh"
#include "saruprngCUDA.h"
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
/*! \file TwoStepBDNVTGPU.cu
\brief Defines GPU kernel code for BDNVT integration on the GPU. Used by TwoStepBDNVTGPU.
*/
//! Shared memory array for gpu_bdnvt_step_two_kernel()
extern __shared__ float s_gammas[];
//! Takes the first half-step forward in the BDNVT integration on a group of particles with
/*! \param d_pos array of particle positions and types
\param d_vel array of particle velocities
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param d_gamma List of per-type gammas
\param n_types Number of particle types in the simulation
\param gamma_diam If true, use particle diameters as gamma. If false, read from d_gamma
\param timestep Current timestep of the simulation
\param seed User chosen random number seed
\param T Temperature set point
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
This kernel is implemented in a very similar manner to gpu_nve_step_one_kernel(), see it for design details.
Random number generation is done per thread with Saru's 3-seed constructor. The seeds are, the time step,
the particle tag, and the user-defined seed.
This kernel must be launched with enough dynamic shared memory per block to read in d_gamma
*/
extern "C" __global__
void gpu_bdnvt_bdforce_kernel(const Scalar4 *d_pos,
const Scalar4 *d_vel,
const Scalar *d_diameter,
const unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
float4 *d_net_force,
float *d_gamma,
unsigned int n_types,
bool gamma_diam,
unsigned int timestep,
unsigned int seed,
float T,
float deltaT,
float D)
{
if (!gamma_diam)
{
// read in the gammas (1 dimensional array)
for (int cur_offset = 0; cur_offset < n_types; cur_offset += blockDim.x)
{
if (cur_offset + threadIdx.x < n_types)
s_gammas[cur_offset + threadIdx.x] = d_gamma[cur_offset + threadIdx.x];
}
__syncthreads();
}
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size)
{
unsigned int idx = d_group_members[group_idx];
// calculate the additional BD force
// read the current particle velocity (MEM TRANSFER: 16 bytes)
float4 vel = d_vel[idx];
// read in the tag of our particle.
// (MEM TRANSFER: 4 bytes)
unsigned int ptag = d_tag[idx];
// calculate the magintude of the random force
float gamma;
if (gamma_diam)
{
// read in the tag of our particle.
// (MEM TRANSFER: 4 bytes)
gamma = d_diameter[idx];
}
else
{
// read in the type of our particle. A texture read of only the fourth part of the position float4
// (where type is stored) is used.
unsigned int typ = __float_as_int(d_pos[idx].w);
gamma = s_gammas[typ];
}
float coeff = sqrtf(6.0f * gamma * T / deltaT);
float3 bd_force = make_float3(0.0f, 0.0f, 0.0f);
//Initialize the Random Number Generator and generate the 3 random numbers
SaruGPU s(ptag, timestep, seed); // 3 dimensional seeding
float randomx=s.f(-1.0, 1.0);
float randomy=s.f(-1.0, 1.0);
float randomz=s.f(-1.0, 1.0);
bd_force.x = randomx*coeff - gamma*vel.x;
bd_force.y = randomy*coeff - gamma*vel.y;
if (D > 2.0f)
bd_force.z = randomz*coeff - gamma*vel.z;
// read in the net force
float4 fi = d_net_force[idx];
// write out data (MEM TRANSFER: 32 bytes)
fi.x += bd_force.x;
fi.y += bd_force.y;
fi.z += bd_force.z;
d_net_force[idx] = fi;
}
}
/*! \param d_pos array of particle positions and types
\param d_vel array of particle velocities
\param d_diameter array of particle diameters
\param d_tag array of particle tags
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Net force on each particle
\param bdnvt_args Collected arguments for gpu_bdnvt_step_two_kernel()
\param deltaT Amount of real time to step forward in one time step
\param D Dimensionality of the system
*/
cudaError_t gpu_bdnvt_force( const Scalar4 *d_pos,
const Scalar4 *d_vel,
const Scalar *d_diameter,
const unsigned int *d_tag,
unsigned int *d_group_members,
unsigned int group_size,
float4 *d_net_force,
const bdnvt_step_two_args& bdnvt_args,
float deltaT,
float D)
{
// setup the grid to run the kernel
int block_size = 256;
dim3 grid( (group_size/block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
gpu_bdnvt_bdforce_kernel<<< grid, threads, sizeof(float)*bdnvt_args.n_types >>>
(d_pos,
d_vel,
d_diameter,
d_tag,
d_group_members,
group_size,
d_net_force,
bdnvt_args.d_gamma,
bdnvt_args.n_types,
bdnvt_args.gamma_diam,
bdnvt_args.timestep,
bdnvt_args.seed,
bdnvt_args.T,
deltaT,
D);
return cudaSuccess;
}
|
981dbd31d8a3e0d707078301f73aa618c196867a.hip | // !!! This is a file automatically generated by hipify!!!
/* This code accompanies
* Two relaxation time lattice Boltzmann method coupled to fast Fourier transform Poisson solver: Application to electroconvective flow, Journal of Computational Physics
* https://doi.org/10.1016/j.jcp.2019.07.029
* Numerical analysis of electroconvection in cross-flow with unipolar charge injection, Physical Review Fluids
* https://doi.org/10.1103/PhysRevFluids.4.103701
*
* Yifei Guan, Igor Novosselov
*
* Author: Yifei Guan
*
*/
#define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <stdlib.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include "LBM.h"
#include <hip/device_functions.h>
#define RAD 1
__global__ void gpu_poisson(double*, double*,double*);
__global__ void gpu_efield(double*, double*, double*);
__global__ void odd_extension(double*, hipfftDoubleComplex*);
__global__ void gpu_derivative(double*, double*, hipfftDoubleComplex*);
__global__ void odd_extract(double*, hipfftDoubleComplex*);
__global__ void gpu_bc(double*);
__device__ __forceinline__ size_t gpu_s_scalar_index(unsigned int x, unsigned int y)
{
return (2*RAD + nThreads)*y + x;
}
__host__
void poisson_phi(double *charge_gpu, double *phi_gpu)
{
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
unsigned int it = 0;
double MAX_ITERATIONS = 1.0E6;
double TOLERANCE = 1.0e-9;
double *Res = (double*)malloc(mem_size_scalar);
double error = 0.0;
double *R;
checkCudaErrors(hipMalloc((void**)&R, mem_size_scalar));
for (it = 0; it < MAX_ITERATIONS; ++it) {
error = 0.0;
gpu_poisson << < grid, threads >> > (charge_gpu, phi_gpu, R);
checkCudaErrors(hipMemcpy(Res, R, mem_size_scalar, hipMemcpyDeviceToHost));
for (unsigned int y = 0; y < NY; ++y) {
for (unsigned int x = 0; x < NX; ++x) {
//if (it % 1000 == 1) printf("%g\n", error);
if (error < Res[scalar_index(x, y)]) error = Res[scalar_index(x, y)];
}
}
if (error < TOLERANCE) break;
}
checkCudaErrors(hipFree(R));
free(Res);
//printf("%g\n", error);
if (it == MAX_ITERATIONS) {
printf("Poisson solver did not converge!\n");
printf("Residual = %g\n", error);
system("pause");
//exit(-1);
}
getLastCudaError("Poisson solver kernel error");
}
__global__ void gpu_poisson(double *c, double *fi,double *R){
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int s_y = threadIdx.y + RAD;
unsigned int s_x = threadIdx.x + RAD;
unsigned int xp1 = (x + blockDim.x) % NX;
unsigned int yp1 = (y + blockDim.y) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
__shared__ double s_in[(2*RAD + nThreads)*3];
// load to shared memory (regular cells)
s_in[gpu_s_scalar_index(s_x,s_y)] = fi[gpu_scalar_index(x, y)];
// load halo cells
if (threadIdx.x < RAD) {
s_in[gpu_s_scalar_index(s_x - RAD, s_y)] = fi[gpu_scalar_index(xm1, y)];
s_in[gpu_s_scalar_index(s_x + blockDim.x, s_y)] = fi[gpu_scalar_index(xp1, y)];
}
if (threadIdx.y < RAD) {
s_in[gpu_s_scalar_index(s_x, s_y - RAD)] = fi[gpu_scalar_index(x, ym1)];
s_in[gpu_s_scalar_index(s_x, s_y + blockDim.y)] = fi[gpu_scalar_index(x, yp1)];
}
// Boundary conditions
if (y == 0) {
fi[gpu_scalar_index(x, y)] = voltage;
return;
}
if (y == NY - 1) {
fi[gpu_scalar_index(x, y)] = 0.0;
return;
}
__syncthreads();
double charge = c[gpu_scalar_index(x, y)];
//double phi = fi[gpu_scalar_index(x, y)];
//double phiL = fi[gpu_scalar_index(xm1, y)];
//double phiR = fi[gpu_scalar_index(xp1, y)];
//double phiU = fi[gpu_scalar_index(x, yp1)];
//double phiD = fi[gpu_scalar_index(x, ym1)];
double phi = s_in[gpu_s_scalar_index(s_x, s_y)];
double phiL = s_in[gpu_s_scalar_index(s_x-1, s_y)];
double phiR = s_in[gpu_s_scalar_index(s_x+1, s_y)];
double phiU = s_in[gpu_s_scalar_index(s_x, s_y+1)];
double phiD = s_in[gpu_s_scalar_index(s_x, s_y-1)];
double source = (charge / eps) * dx *dx; // Right hand side of the equation
double phi_old = phi;
phi = 0.25 * (phiL + phiR + phiU + phiD + source);
// Record the error
R[gpu_scalar_index(x, y)] = fabs(phi - phi_old);
//__syncthreads();
fi[gpu_scalar_index(x, y)] = phi;
//if (x == 5 && y == 5) printf("%g\n", phi);
}
__host__
void efield(double *phi_gpu, double *Ex_gpu, double *Ey_gpu) {
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_efield << < grid, threads >> > (phi_gpu, Ex_gpu, Ey_gpu);
gpu_bc << <grid, threads >> > (Ey_gpu);
getLastCudaError("Efield kernel error");
}
__global__ void gpu_efield(double *fi, double *ex, double *ey){
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int xp1 = (x + 1) % NX;
unsigned int yp1 = (y + 1) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
double phi = fi[gpu_scalar_index(x, y)];
double phiL = fi[gpu_scalar_index(xm1, y)];
double phiR = fi[gpu_scalar_index(xp1, y)];
double phiU = fi[gpu_scalar_index(x, yp1)];
double phiD = fi[gpu_scalar_index(x, ym1)];
ex[gpu_scalar_index(x, y)] = 0.5*(phiL - phiR) / dx;
ey[gpu_scalar_index(x, y)] = 0.5*(phiD - phiU) / dy;
}
__global__ void gpu_bc(double *ey) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
//ex[gpu_scalar_index(x, 0)] = ex[gpu_scalar_index(x, 1)];
ey[gpu_scalar_index(x, 0)] = ey[gpu_scalar_index(x, 1)];
return;
}
if (y == NY - 1) {
//ex[gpu_scalar_index(x, NY - 1)] = ex[gpu_scalar_index(x, NY - 2)];
ey[gpu_scalar_index(x, NY - 1)] = ey[gpu_scalar_index(x, NY - 2)];
return;
}
}
// =========================================================================
// Fast poisson solver domain extension
// =========================================================================
__host__ void extension(double *c, hipfftDoubleComplex *c_ext) {
// blocks in grid
dim3 grid(NX / nThreads, NE, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
odd_extension << < grid, threads >> > (c, c_ext);
getLastCudaError("Odd Extension error");
}
__global__ void odd_extension(double *charge, hipfftDoubleComplex *charge_ext) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
charge_ext[gpu_scalar_index(x, y)].x = 0.0;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == 1) {
charge_ext[gpu_scalar_index(x, y)].x = -charge[gpu_scalar_index(x, y)] / eps - voltage / dy / dy;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y > 1 && y < NY - 1) {
charge_ext[gpu_scalar_index(x, y)].x = -charge[gpu_scalar_index(x, y)] / eps;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == NY - 1) {
charge_ext[gpu_scalar_index(x, y)].x = 0.0;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y > NY - 1 && y<NE-1) {
charge_ext[gpu_scalar_index(x, y)].x = charge[gpu_scalar_index(x, NE - y)] / eps;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == NE - 1) {
charge_ext[gpu_scalar_index(x, y)].x = charge[gpu_scalar_index(x, 1)] / eps + voltage / dy / dy;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
}
__host__ void derivative(double *kx, double *ky, hipfftDoubleComplex *source) {
// blocks in grid
dim3 grid(NX / nThreads, NE, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_derivative << < grid, threads >> > (kx, ky, source);
getLastCudaError("Gpu derivative error");
}
__global__ void gpu_derivative(double *kx, double *ky, hipfftDoubleComplex *source) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
double I = kx[x];
double J = ky[y];
double mu = (4.0 / dy / dy)*(sin(J*dy*0.5)*sin(J*dy*0.5)) + I*I;
if (y == 0 && x == 0) mu = 1.0;
source[gpu_scalar_index(x, y)].x = -source[gpu_scalar_index(x, y)].x / mu;
source[gpu_scalar_index(x, y)].y = -source[gpu_scalar_index(x, y)].y / mu;
}
__host__ void extract(double *fi, hipfftDoubleComplex *fi_ext) {
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
odd_extract << < grid, threads >> > (fi, fi_ext);
getLastCudaError("Odd Extension error");
}
__global__ void odd_extract(double *phi, hipfftDoubleComplex *phi_ext) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
phi[gpu_scalar_index(x, y)] = voltage;
return;
}
if (y == NY-1) {
phi[gpu_scalar_index(x, y)] = 0.0;
return;
}
phi[gpu_scalar_index(x, y)] = phi_ext[gpu_scalar_index(x, y)].x/SIZE;
}
| 981dbd31d8a3e0d707078301f73aa618c196867a.cu | /* This code accompanies
* Two relaxation time lattice Boltzmann method coupled to fast Fourier transform Poisson solver: Application to electroconvective flow, Journal of Computational Physics
* https://doi.org/10.1016/j.jcp.2019.07.029
* Numerical analysis of electroconvection in cross-flow with unipolar charge injection, Physical Review Fluids
* https://doi.org/10.1103/PhysRevFluids.4.103701
*
* Yifei Guan, Igor Novosselov
*
* Author: Yifei Guan
*
*/
#define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <stdlib.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <cufft.h>
#include "LBM.h"
#include <device_functions.h>
#define RAD 1
__global__ void gpu_poisson(double*, double*,double*);
__global__ void gpu_efield(double*, double*, double*);
__global__ void odd_extension(double*, cufftDoubleComplex*);
__global__ void gpu_derivative(double*, double*, cufftDoubleComplex*);
__global__ void odd_extract(double*, cufftDoubleComplex*);
__global__ void gpu_bc(double*);
__device__ __forceinline__ size_t gpu_s_scalar_index(unsigned int x, unsigned int y)
{
return (2*RAD + nThreads)*y + x;
}
__host__
void poisson_phi(double *charge_gpu, double *phi_gpu)
{
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
unsigned int it = 0;
double MAX_ITERATIONS = 1.0E6;
double TOLERANCE = 1.0e-9;
double *Res = (double*)malloc(mem_size_scalar);
double error = 0.0;
double *R;
checkCudaErrors(cudaMalloc((void**)&R, mem_size_scalar));
for (it = 0; it < MAX_ITERATIONS; ++it) {
error = 0.0;
gpu_poisson << < grid, threads >> > (charge_gpu, phi_gpu, R);
checkCudaErrors(cudaMemcpy(Res, R, mem_size_scalar, cudaMemcpyDeviceToHost));
for (unsigned int y = 0; y < NY; ++y) {
for (unsigned int x = 0; x < NX; ++x) {
//if (it % 1000 == 1) printf("%g\n", error);
if (error < Res[scalar_index(x, y)]) error = Res[scalar_index(x, y)];
}
}
if (error < TOLERANCE) break;
}
checkCudaErrors(cudaFree(R));
free(Res);
//printf("%g\n", error);
if (it == MAX_ITERATIONS) {
printf("Poisson solver did not converge!\n");
printf("Residual = %g\n", error);
system("pause");
//exit(-1);
}
getLastCudaError("Poisson solver kernel error");
}
__global__ void gpu_poisson(double *c, double *fi,double *R){
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int s_y = threadIdx.y + RAD;
unsigned int s_x = threadIdx.x + RAD;
unsigned int xp1 = (x + blockDim.x) % NX;
unsigned int yp1 = (y + blockDim.y) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
__shared__ double s_in[(2*RAD + nThreads)*3];
// load to shared memory (regular cells)
s_in[gpu_s_scalar_index(s_x,s_y)] = fi[gpu_scalar_index(x, y)];
// load halo cells
if (threadIdx.x < RAD) {
s_in[gpu_s_scalar_index(s_x - RAD, s_y)] = fi[gpu_scalar_index(xm1, y)];
s_in[gpu_s_scalar_index(s_x + blockDim.x, s_y)] = fi[gpu_scalar_index(xp1, y)];
}
if (threadIdx.y < RAD) {
s_in[gpu_s_scalar_index(s_x, s_y - RAD)] = fi[gpu_scalar_index(x, ym1)];
s_in[gpu_s_scalar_index(s_x, s_y + blockDim.y)] = fi[gpu_scalar_index(x, yp1)];
}
// Boundary conditions
if (y == 0) {
fi[gpu_scalar_index(x, y)] = voltage;
return;
}
if (y == NY - 1) {
fi[gpu_scalar_index(x, y)] = 0.0;
return;
}
__syncthreads();
double charge = c[gpu_scalar_index(x, y)];
//double phi = fi[gpu_scalar_index(x, y)];
//double phiL = fi[gpu_scalar_index(xm1, y)];
//double phiR = fi[gpu_scalar_index(xp1, y)];
//double phiU = fi[gpu_scalar_index(x, yp1)];
//double phiD = fi[gpu_scalar_index(x, ym1)];
double phi = s_in[gpu_s_scalar_index(s_x, s_y)];
double phiL = s_in[gpu_s_scalar_index(s_x-1, s_y)];
double phiR = s_in[gpu_s_scalar_index(s_x+1, s_y)];
double phiU = s_in[gpu_s_scalar_index(s_x, s_y+1)];
double phiD = s_in[gpu_s_scalar_index(s_x, s_y-1)];
double source = (charge / eps) * dx *dx; // Right hand side of the equation
double phi_old = phi;
phi = 0.25 * (phiL + phiR + phiU + phiD + source);
// Record the error
R[gpu_scalar_index(x, y)] = fabs(phi - phi_old);
//__syncthreads();
fi[gpu_scalar_index(x, y)] = phi;
//if (x == 5 && y == 5) printf("%g\n", phi);
}
__host__
void efield(double *phi_gpu, double *Ex_gpu, double *Ey_gpu) {
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_efield << < grid, threads >> > (phi_gpu, Ex_gpu, Ey_gpu);
gpu_bc << <grid, threads >> > (Ey_gpu);
getLastCudaError("Efield kernel error");
}
__global__ void gpu_efield(double *fi, double *ex, double *ey){
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int xp1 = (x + 1) % NX;
unsigned int yp1 = (y + 1) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
double phi = fi[gpu_scalar_index(x, y)];
double phiL = fi[gpu_scalar_index(xm1, y)];
double phiR = fi[gpu_scalar_index(xp1, y)];
double phiU = fi[gpu_scalar_index(x, yp1)];
double phiD = fi[gpu_scalar_index(x, ym1)];
ex[gpu_scalar_index(x, y)] = 0.5*(phiL - phiR) / dx;
ey[gpu_scalar_index(x, y)] = 0.5*(phiD - phiU) / dy;
}
__global__ void gpu_bc(double *ey) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
//ex[gpu_scalar_index(x, 0)] = ex[gpu_scalar_index(x, 1)];
ey[gpu_scalar_index(x, 0)] = ey[gpu_scalar_index(x, 1)];
return;
}
if (y == NY - 1) {
//ex[gpu_scalar_index(x, NY - 1)] = ex[gpu_scalar_index(x, NY - 2)];
ey[gpu_scalar_index(x, NY - 1)] = ey[gpu_scalar_index(x, NY - 2)];
return;
}
}
// =========================================================================
// Fast poisson solver domain extension
// =========================================================================
__host__ void extension(double *c, cufftDoubleComplex *c_ext) {
// blocks in grid
dim3 grid(NX / nThreads, NE, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
odd_extension << < grid, threads >> > (c, c_ext);
getLastCudaError("Odd Extension error");
}
__global__ void odd_extension(double *charge, cufftDoubleComplex *charge_ext) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
charge_ext[gpu_scalar_index(x, y)].x = 0.0;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == 1) {
charge_ext[gpu_scalar_index(x, y)].x = -charge[gpu_scalar_index(x, y)] / eps - voltage / dy / dy;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y > 1 && y < NY - 1) {
charge_ext[gpu_scalar_index(x, y)].x = -charge[gpu_scalar_index(x, y)] / eps;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == NY - 1) {
charge_ext[gpu_scalar_index(x, y)].x = 0.0;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y > NY - 1 && y<NE-1) {
charge_ext[gpu_scalar_index(x, y)].x = charge[gpu_scalar_index(x, NE - y)] / eps;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == NE - 1) {
charge_ext[gpu_scalar_index(x, y)].x = charge[gpu_scalar_index(x, 1)] / eps + voltage / dy / dy;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
}
__host__ void derivative(double *kx, double *ky, cufftDoubleComplex *source) {
// blocks in grid
dim3 grid(NX / nThreads, NE, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_derivative << < grid, threads >> > (kx, ky, source);
getLastCudaError("Gpu derivative error");
}
__global__ void gpu_derivative(double *kx, double *ky, cufftDoubleComplex *source) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
double I = kx[x];
double J = ky[y];
double mu = (4.0 / dy / dy)*(sin(J*dy*0.5)*sin(J*dy*0.5)) + I*I;
if (y == 0 && x == 0) mu = 1.0;
source[gpu_scalar_index(x, y)].x = -source[gpu_scalar_index(x, y)].x / mu;
source[gpu_scalar_index(x, y)].y = -source[gpu_scalar_index(x, y)].y / mu;
}
__host__ void extract(double *fi, cufftDoubleComplex *fi_ext) {
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
odd_extract << < grid, threads >> > (fi, fi_ext);
getLastCudaError("Odd Extension error");
}
__global__ void odd_extract(double *phi, cufftDoubleComplex *phi_ext) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
phi[gpu_scalar_index(x, y)] = voltage;
return;
}
if (y == NY-1) {
phi[gpu_scalar_index(x, y)] = 0.0;
return;
}
phi[gpu_scalar_index(x, y)] = phi_ext[gpu_scalar_index(x, y)].x/SIZE;
}
|
fc11354b07aeeb09566968a508479a9ac2c10f94.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#ifndef LBM_KERNEL_CU
#define LBM_KERNEL_CU
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <float.h>
// includes, project
#include "main.h"
#include "lbm.h"
#ifndef __MCUDA__
#include <hip/hip_runtime.h>
#else
#include <mcuda.h>
#endif
#define DFL1 (1.0f/ 3.0f)
#define DFL2 (1.0f/18.0f)
#define DFL3 (1.0f/36.0f)
/******************************************************************************/
#if 0
void __global__ performStreamCollide_kernel_wrapper_grad(float* srcGrid, float* d_srcGrid, float *dstGrid, float *d_dstGrid) {
#ifdef ALLOW_AD
__enzyme_autodiff((void *) performStreamCollide_kernel,
enzyme_dup, srcGrid, d_srcGrid,
enzyme_dup, dstGrid, d_dstGrid);
#else
performStreamCollide_kernel(srcGrid, dstGrid);
#endif
}
#endif
__attribute__((always_inline)) __device__ void performStreamCollide_kernel( void* __restrict__ srcGridUntyped, float* __restrict__ dstGrid )
{
float * srcGrid = (float *) srcGridUntyped;
//Using some predefined macros here. Consider this the declaration
// and initialization of the variables SWEEP_X, SWEEP_Y and SWEEP_Z
SWEEP_VAR
SWEEP_X = threadIdx.x;
SWEEP_Y = blockIdx.x;
SWEEP_Z = blockIdx.y;
float temp_swp, tempC, tempN, tempS, tempE, tempW, tempT, tempB;
float tempNE, tempNW, tempSE, tempSW, tempNT, tempNB, tempST ;
float tempSB, tempET, tempEB, tempWT, tempWB ;
//Load all of the input fields
//This is a gather operation of the SCATTER preprocessor variable
// is undefined in layout_config.h, or a "local" read otherwise
tempC = SRC_C(srcGrid);
tempN = SRC_N(srcGrid);
tempS = SRC_S(srcGrid);
tempE = SRC_E(srcGrid);
tempW = SRC_W(srcGrid);
tempT = SRC_T(srcGrid);
tempB = SRC_B(srcGrid);
tempNE= SRC_NE(srcGrid);
tempNW= SRC_NW(srcGrid);
tempSE = SRC_SE(srcGrid);
tempSW = SRC_SW(srcGrid);
tempNT = SRC_NT(srcGrid);
tempNB = SRC_NB(srcGrid);
tempST = SRC_ST(srcGrid);
tempSB = SRC_SB(srcGrid);
tempET = SRC_ET(srcGrid);
tempEB = SRC_EB(srcGrid);
tempWT = SRC_WT(srcGrid);
tempWB = SRC_WB(srcGrid);
//Test whether the cell is fluid or obstacle
if( TEST_FLAG_SWEEP( srcGrid, OBSTACLE )) {
//Swizzle the inputs: reflect any fluid coming into this cell
// back to where it came from
temp_swp = tempN ; tempN = tempS ; tempS = temp_swp ;
temp_swp = tempE ; tempE = tempW ; tempW = temp_swp;
temp_swp = tempT ; tempT = tempB ; tempB = temp_swp;
temp_swp = tempNE; tempNE = tempSW ; tempSW = temp_swp;
temp_swp = tempNW; tempNW = tempSE ; tempSE = temp_swp;
temp_swp = tempNT ; tempNT = tempSB ; tempSB = temp_swp;
temp_swp = tempNB ; tempNB = tempST ; tempST = temp_swp;
temp_swp = tempET ; tempET= tempWB ; tempWB = temp_swp;
temp_swp = tempEB ; tempEB = tempWT ; tempWT = temp_swp;
}
else {
//The math meat of LBM: ignore for optimization
float ux, uy, uz, rho, u2;
float temp1, temp2, temp_base;
rho = tempC + tempN
+ tempS + tempE
+ tempW + tempT
+ tempB + tempNE
+ tempNW + tempSE
+ tempSW + tempNT
+ tempNB + tempST
+ tempSB + tempET
+ tempEB + tempWT
+ tempWB;
ux = + tempE - tempW
+ tempNE - tempNW
+ tempSE - tempSW
+ tempET + tempEB
- tempWT - tempWB;
uy = + tempN - tempS
+ tempNE + tempNW
- tempSE - tempSW
+ tempNT + tempNB
- tempST - tempSB;
uz = + tempT - tempB
+ tempNT - tempNB
+ tempST - tempSB
+ tempET - tempEB
+ tempWT - tempWB;
ux /= rho;
uy /= rho;
uz /= rho;
if( TEST_FLAG_SWEEP( srcGrid, ACCEL )) {
ux = 0.005f;
uy = 0.002f;
uz = 0.000f;
}
u2 = 1.5f * (ux*ux + uy*uy + uz*uz) - 1.0f;
temp_base = OMEGA*rho;
temp1 = DFL1*temp_base;
//Put the output values for this cell in the shared memory
temp_base = OMEGA*rho;
temp1 = DFL1*temp_base;
temp2 = 1.0f-OMEGA;
tempC = temp2*tempC + temp1*( - u2);
temp1 = DFL2*temp_base;
tempN = temp2*tempN + temp1*( uy*(4.5f*uy + 3.0f) - u2);
tempS = temp2*tempS + temp1*( uy*(4.5f*uy - 3.0f) - u2);
tempT = temp2*tempT + temp1*( uz*(4.5f*uz + 3.0f) - u2);
tempB = temp2*tempB + temp1*( uz*(4.5f*uz - 3.0f) - u2);
tempE = temp2*tempE + temp1*( ux*(4.5f*ux + 3.0f) - u2);
tempW = temp2*tempW + temp1*( ux*(4.5f*ux - 3.0f) - u2);
temp1 = DFL3*temp_base;
tempNT= temp2*tempNT + temp1 *( (+uy+uz)*(4.5f*(+uy+uz) + 3.0f) - u2);
tempNB= temp2*tempNB + temp1 *( (+uy-uz)*(4.5f*(+uy-uz) + 3.0f) - u2);
tempST= temp2*tempST + temp1 *( (-uy+uz)*(4.5f*(-uy+uz) + 3.0f) - u2);
tempSB= temp2*tempSB + temp1 *( (-uy-uz)*(4.5f*(-uy-uz) + 3.0f) - u2);
tempNE = temp2*tempNE + temp1 *( (+ux+uy)*(4.5f*(+ux+uy) + 3.0f) - u2);
tempSE = temp2*tempSE + temp1 *((+ux-uy)*(4.5f*(+ux-uy) + 3.0f) - u2);
tempET = temp2*tempET + temp1 *( (+ux+uz)*(4.5f*(+ux+uz) + 3.0f) - u2);
tempEB = temp2*tempEB + temp1 *( (+ux-uz)*(4.5f*(+ux-uz) + 3.0f) - u2);
tempNW = temp2*tempNW + temp1 *( (-ux+uy)*(4.5f*(-ux+uy) + 3.0f) - u2);
tempSW = temp2*tempSW + temp1 *( (-ux-uy)*(4.5f*(-ux-uy) + 3.0f) - u2);
tempWT = temp2*tempWT + temp1 *( (-ux+uz)*(4.5f*(-ux+uz) + 3.0f) - u2);
tempWB = temp2*tempWB + temp1 *( (-ux-uz)*(4.5f*(-ux-uz) + 3.0f) - u2);
}
//Write the results computed above
//This is a scatter operation of the SCATTER preprocessor variable
// is defined in layout_config.h, or a "local" write otherwise
DST_C ( dstGrid ) = tempC;
DST_N ( dstGrid ) = tempN;
DST_S ( dstGrid ) = tempS;
DST_E ( dstGrid ) = tempE;
DST_W ( dstGrid ) = tempW;
DST_T ( dstGrid ) = tempT;
DST_B ( dstGrid ) = tempB;
DST_NE( dstGrid ) = tempNE;
DST_NW( dstGrid ) = tempNW;
DST_SE( dstGrid ) = tempSE;
DST_SW( dstGrid ) = tempSW;
DST_NT( dstGrid ) = tempNT;
DST_NB( dstGrid ) = tempNB;
DST_ST( dstGrid ) = tempST;
DST_SB( dstGrid ) = tempSB;
DST_ET( dstGrid ) = tempET;
DST_EB( dstGrid ) = tempEB;
DST_WT( dstGrid ) = tempWT;
DST_WB( dstGrid ) = tempWB;
}
__global__ void performStreamCollide_kernel_wrapper( float* srcGrid, float* dstGrid )
{
performStreamCollide_kernel(srcGrid, dstGrid );
}
#ifdef ALLOW_AD
struct Byte20 {
char x[SIZE];
};
extern __device__ int enzyme_dup;
extern __device__ int enzyme_allocated;
#ifdef ABI
__device__ Byte20 __enzyme_augmentfwd(void*, int, size_t, int, float*, float*, int, float*, float*);
__global__ void performStreamCollide_augmented( float* src, float* dsrc, float* dst, float* ddst, Byte20* tape)
{
size_t idx = threadIdx.x + SIZE_X * (blockIdx.x + SIZE_Y * blockIdx.y);
tape[idx] = __enzyme_augmentfwd((void*)performStreamCollide_kernel, enzyme_allocated, sizeof(Byte20), enzyme_dup, src, dsrc, enzyme_dup, dst, ddst);
}
__device__ void __enzyme_reverse(void*, int, size_t, int, float*, float*, int, float*, float*, Byte20);
__global__ void performStreamCollide_gradient( float* src, float* dsrc, float* dst, float* ddst, Byte20* tape)
{
size_t idx = threadIdx.x + SIZE_X * (blockIdx.x + SIZE_Y * blockIdx.y);
__enzyme_reverse((void*)performStreamCollide_kernel, enzyme_allocated, sizeof(Byte20), enzyme_dup, src, dsrc, enzyme_dup, dst, ddst, tape[idx]);
}
#else
__device__ Byte20 __enzyme_augmentfwd(void*, int, float*, float*, int, float*, float*);
__global__ void performStreamCollide_augmented( float* src, float* dsrc, float* dst, float* ddst, Byte20* tape)
{
size_t idx = threadIdx.x + SIZE_X * (blockIdx.x + SIZE_Y * blockIdx.y);
tape[idx] = __enzyme_augmentfwd((void*)performStreamCollide_kernel, enzyme_dup, src, dsrc, enzyme_dup, dst, ddst);
}
__device__ void __enzyme_reverse(void*, int, float*, float*, int, float*, float*, Byte20);
__global__ void performStreamCollide_gradient( float* src, float* dsrc, float* dst, float* ddst, Byte20* tape)
{
size_t idx = threadIdx.x + SIZE_X * (blockIdx.x + SIZE_Y * blockIdx.y);
__enzyme_reverse((void*)performStreamCollide_kernel, enzyme_dup, src, dsrc, enzyme_dup, dst, ddst, tape[idx]);
}
#endif
#endif
#endif // LBM_KERNEL_CU
| fc11354b07aeeb09566968a508479a9ac2c10f94.cu | /***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#ifndef LBM_KERNEL_CU
#define LBM_KERNEL_CU
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <float.h>
// includes, project
#include "main.h"
#include "lbm.h"
#ifndef __MCUDA__
#include <cuda.h>
#else
#include <mcuda.h>
#endif
#define DFL1 (1.0f/ 3.0f)
#define DFL2 (1.0f/18.0f)
#define DFL3 (1.0f/36.0f)
/******************************************************************************/
#if 0
void __global__ performStreamCollide_kernel_wrapper_grad(float* srcGrid, float* d_srcGrid, float *dstGrid, float *d_dstGrid) {
#ifdef ALLOW_AD
__enzyme_autodiff((void *) performStreamCollide_kernel,
enzyme_dup, srcGrid, d_srcGrid,
enzyme_dup, dstGrid, d_dstGrid);
#else
performStreamCollide_kernel(srcGrid, dstGrid);
#endif
}
#endif
__attribute__((always_inline)) __device__ void performStreamCollide_kernel( void* __restrict__ srcGridUntyped, float* __restrict__ dstGrid )
{
float * srcGrid = (float *) srcGridUntyped;
//Using some predefined macros here. Consider this the declaration
// and initialization of the variables SWEEP_X, SWEEP_Y and SWEEP_Z
SWEEP_VAR
SWEEP_X = threadIdx.x;
SWEEP_Y = blockIdx.x;
SWEEP_Z = blockIdx.y;
float temp_swp, tempC, tempN, tempS, tempE, tempW, tempT, tempB;
float tempNE, tempNW, tempSE, tempSW, tempNT, tempNB, tempST ;
float tempSB, tempET, tempEB, tempWT, tempWB ;
//Load all of the input fields
//This is a gather operation of the SCATTER preprocessor variable
// is undefined in layout_config.h, or a "local" read otherwise
tempC = SRC_C(srcGrid);
tempN = SRC_N(srcGrid);
tempS = SRC_S(srcGrid);
tempE = SRC_E(srcGrid);
tempW = SRC_W(srcGrid);
tempT = SRC_T(srcGrid);
tempB = SRC_B(srcGrid);
tempNE= SRC_NE(srcGrid);
tempNW= SRC_NW(srcGrid);
tempSE = SRC_SE(srcGrid);
tempSW = SRC_SW(srcGrid);
tempNT = SRC_NT(srcGrid);
tempNB = SRC_NB(srcGrid);
tempST = SRC_ST(srcGrid);
tempSB = SRC_SB(srcGrid);
tempET = SRC_ET(srcGrid);
tempEB = SRC_EB(srcGrid);
tempWT = SRC_WT(srcGrid);
tempWB = SRC_WB(srcGrid);
//Test whether the cell is fluid or obstacle
if( TEST_FLAG_SWEEP( srcGrid, OBSTACLE )) {
//Swizzle the inputs: reflect any fluid coming into this cell
// back to where it came from
temp_swp = tempN ; tempN = tempS ; tempS = temp_swp ;
temp_swp = tempE ; tempE = tempW ; tempW = temp_swp;
temp_swp = tempT ; tempT = tempB ; tempB = temp_swp;
temp_swp = tempNE; tempNE = tempSW ; tempSW = temp_swp;
temp_swp = tempNW; tempNW = tempSE ; tempSE = temp_swp;
temp_swp = tempNT ; tempNT = tempSB ; tempSB = temp_swp;
temp_swp = tempNB ; tempNB = tempST ; tempST = temp_swp;
temp_swp = tempET ; tempET= tempWB ; tempWB = temp_swp;
temp_swp = tempEB ; tempEB = tempWT ; tempWT = temp_swp;
}
else {
//The math meat of LBM: ignore for optimization
float ux, uy, uz, rho, u2;
float temp1, temp2, temp_base;
rho = tempC + tempN
+ tempS + tempE
+ tempW + tempT
+ tempB + tempNE
+ tempNW + tempSE
+ tempSW + tempNT
+ tempNB + tempST
+ tempSB + tempET
+ tempEB + tempWT
+ tempWB;
ux = + tempE - tempW
+ tempNE - tempNW
+ tempSE - tempSW
+ tempET + tempEB
- tempWT - tempWB;
uy = + tempN - tempS
+ tempNE + tempNW
- tempSE - tempSW
+ tempNT + tempNB
- tempST - tempSB;
uz = + tempT - tempB
+ tempNT - tempNB
+ tempST - tempSB
+ tempET - tempEB
+ tempWT - tempWB;
ux /= rho;
uy /= rho;
uz /= rho;
if( TEST_FLAG_SWEEP( srcGrid, ACCEL )) {
ux = 0.005f;
uy = 0.002f;
uz = 0.000f;
}
u2 = 1.5f * (ux*ux + uy*uy + uz*uz) - 1.0f;
temp_base = OMEGA*rho;
temp1 = DFL1*temp_base;
//Put the output values for this cell in the shared memory
temp_base = OMEGA*rho;
temp1 = DFL1*temp_base;
temp2 = 1.0f-OMEGA;
tempC = temp2*tempC + temp1*( - u2);
temp1 = DFL2*temp_base;
tempN = temp2*tempN + temp1*( uy*(4.5f*uy + 3.0f) - u2);
tempS = temp2*tempS + temp1*( uy*(4.5f*uy - 3.0f) - u2);
tempT = temp2*tempT + temp1*( uz*(4.5f*uz + 3.0f) - u2);
tempB = temp2*tempB + temp1*( uz*(4.5f*uz - 3.0f) - u2);
tempE = temp2*tempE + temp1*( ux*(4.5f*ux + 3.0f) - u2);
tempW = temp2*tempW + temp1*( ux*(4.5f*ux - 3.0f) - u2);
temp1 = DFL3*temp_base;
tempNT= temp2*tempNT + temp1 *( (+uy+uz)*(4.5f*(+uy+uz) + 3.0f) - u2);
tempNB= temp2*tempNB + temp1 *( (+uy-uz)*(4.5f*(+uy-uz) + 3.0f) - u2);
tempST= temp2*tempST + temp1 *( (-uy+uz)*(4.5f*(-uy+uz) + 3.0f) - u2);
tempSB= temp2*tempSB + temp1 *( (-uy-uz)*(4.5f*(-uy-uz) + 3.0f) - u2);
tempNE = temp2*tempNE + temp1 *( (+ux+uy)*(4.5f*(+ux+uy) + 3.0f) - u2);
tempSE = temp2*tempSE + temp1 *((+ux-uy)*(4.5f*(+ux-uy) + 3.0f) - u2);
tempET = temp2*tempET + temp1 *( (+ux+uz)*(4.5f*(+ux+uz) + 3.0f) - u2);
tempEB = temp2*tempEB + temp1 *( (+ux-uz)*(4.5f*(+ux-uz) + 3.0f) - u2);
tempNW = temp2*tempNW + temp1 *( (-ux+uy)*(4.5f*(-ux+uy) + 3.0f) - u2);
tempSW = temp2*tempSW + temp1 *( (-ux-uy)*(4.5f*(-ux-uy) + 3.0f) - u2);
tempWT = temp2*tempWT + temp1 *( (-ux+uz)*(4.5f*(-ux+uz) + 3.0f) - u2);
tempWB = temp2*tempWB + temp1 *( (-ux-uz)*(4.5f*(-ux-uz) + 3.0f) - u2);
}
//Write the results computed above
//This is a scatter operation of the SCATTER preprocessor variable
// is defined in layout_config.h, or a "local" write otherwise
DST_C ( dstGrid ) = tempC;
DST_N ( dstGrid ) = tempN;
DST_S ( dstGrid ) = tempS;
DST_E ( dstGrid ) = tempE;
DST_W ( dstGrid ) = tempW;
DST_T ( dstGrid ) = tempT;
DST_B ( dstGrid ) = tempB;
DST_NE( dstGrid ) = tempNE;
DST_NW( dstGrid ) = tempNW;
DST_SE( dstGrid ) = tempSE;
DST_SW( dstGrid ) = tempSW;
DST_NT( dstGrid ) = tempNT;
DST_NB( dstGrid ) = tempNB;
DST_ST( dstGrid ) = tempST;
DST_SB( dstGrid ) = tempSB;
DST_ET( dstGrid ) = tempET;
DST_EB( dstGrid ) = tempEB;
DST_WT( dstGrid ) = tempWT;
DST_WB( dstGrid ) = tempWB;
}
__global__ void performStreamCollide_kernel_wrapper( float* srcGrid, float* dstGrid )
{
performStreamCollide_kernel(srcGrid, dstGrid );
}
#ifdef ALLOW_AD
struct Byte20 {
char x[SIZE];
};
extern __device__ int enzyme_dup;
extern __device__ int enzyme_allocated;
#ifdef ABI
__device__ Byte20 __enzyme_augmentfwd(void*, int, size_t, int, float*, float*, int, float*, float*);
__global__ void performStreamCollide_augmented( float* src, float* dsrc, float* dst, float* ddst, Byte20* tape)
{
size_t idx = threadIdx.x + SIZE_X * (blockIdx.x + SIZE_Y * blockIdx.y);
tape[idx] = __enzyme_augmentfwd((void*)performStreamCollide_kernel, enzyme_allocated, sizeof(Byte20), enzyme_dup, src, dsrc, enzyme_dup, dst, ddst);
}
__device__ void __enzyme_reverse(void*, int, size_t, int, float*, float*, int, float*, float*, Byte20);
__global__ void performStreamCollide_gradient( float* src, float* dsrc, float* dst, float* ddst, Byte20* tape)
{
size_t idx = threadIdx.x + SIZE_X * (blockIdx.x + SIZE_Y * blockIdx.y);
__enzyme_reverse((void*)performStreamCollide_kernel, enzyme_allocated, sizeof(Byte20), enzyme_dup, src, dsrc, enzyme_dup, dst, ddst, tape[idx]);
}
#else
__device__ Byte20 __enzyme_augmentfwd(void*, int, float*, float*, int, float*, float*);
__global__ void performStreamCollide_augmented( float* src, float* dsrc, float* dst, float* ddst, Byte20* tape)
{
size_t idx = threadIdx.x + SIZE_X * (blockIdx.x + SIZE_Y * blockIdx.y);
tape[idx] = __enzyme_augmentfwd((void*)performStreamCollide_kernel, enzyme_dup, src, dsrc, enzyme_dup, dst, ddst);
}
__device__ void __enzyme_reverse(void*, int, float*, float*, int, float*, float*, Byte20);
__global__ void performStreamCollide_gradient( float* src, float* dsrc, float* dst, float* ddst, Byte20* tape)
{
size_t idx = threadIdx.x + SIZE_X * (blockIdx.x + SIZE_Y * blockIdx.y);
__enzyme_reverse((void*)performStreamCollide_kernel, enzyme_dup, src, dsrc, enzyme_dup, dst, ddst, tape[idx]);
}
#endif
#endif
#endif // LBM_KERNEL_CU
|
dee4e02a9186cbbab20aa67566fcbae5a0948c71.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "slice_rows_kernel.h"
#include <stdio.h>
#include "cuMat_config.h"
__global__ void slice_rows_kernel (const float * __restrict__ src,
float * __restrict__ dst, int m, int n, int offset, int len){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
//printf("%d x %d\n", row, col);
if (offset <= row && row < offset+len && row < n && col < m){
//printf("offset:%d len:%d %dx%d [%d] %f\n", offset, len, row, col, col * n + row, src[col * n + row]);
dst[col * len + row-offset] = src[col * n + row];
}
}
void slice_rows_kernel_exec(const float *src, float *dst, int m, int n, int offset, int len){
/* specified block and grid size */
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((m+block.x-1)/block.x, (n+block.y-1)/block.y);
//printf("m:%d n:%d offset:%d len:%d\n", m, n, offset, len);
/* lunch kernel */
hipLaunchKernelGGL(( slice_rows_kernel), dim3(grid), dim3(block), 0, 0, src, dst, m, n, offset, len);
hipDeviceSynchronize();
}
__global__ void join_rows_kernel (const float * __restrict__ src,
float * __restrict__ dst, int m, int n, int offset, int len){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
//printf("%d x %d\n", row, col);
if (offset <= row && row < offset+len && row < n && col < m){
//printf("offset:%d len:%d %dx%d [%d] %f\n", offset, len, row, col, col * n + row, src[col * n + row]);
dst[col * n + row] = src[col * len + row-offset];
}
}
void join_rows_kernel_exec(const float *src, float *dst, int m, int n, int offset, int len){
/* specified block and grid size */
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((m+block.x-1)/block.x, (n+block.y-1)/block.y);
//printf("m:%d n:%d offset:%d len:%d\n", m, n, offset, len);
/* lunch kernel */
hipLaunchKernelGGL(( join_rows_kernel), dim3(grid), dim3(block), 0, 0, src, dst, m, n, offset, len);
hipDeviceSynchronize();
}
| dee4e02a9186cbbab20aa67566fcbae5a0948c71.cu | #include "slice_rows_kernel.h"
#include <stdio.h>
#include "cuMat_config.h"
__global__ void slice_rows_kernel (const float * __restrict__ src,
float * __restrict__ dst, int m, int n, int offset, int len){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
//printf("%d x %d\n", row, col);
if (offset <= row && row < offset+len && row < n && col < m){
//printf("offset:%d len:%d %dx%d [%d] %f\n", offset, len, row, col, col * n + row, src[col * n + row]);
dst[col * len + row-offset] = src[col * n + row];
}
}
void slice_rows_kernel_exec(const float *src, float *dst, int m, int n, int offset, int len){
/* specified block and grid size */
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((m+block.x-1)/block.x, (n+block.y-1)/block.y);
//printf("m:%d n:%d offset:%d len:%d\n", m, n, offset, len);
/* lunch kernel */
slice_rows_kernel<<<grid, block>>>(src, dst, m, n, offset, len);
cudaThreadSynchronize();
}
__global__ void join_rows_kernel (const float * __restrict__ src,
float * __restrict__ dst, int m, int n, int offset, int len){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
//printf("%d x %d\n", row, col);
if (offset <= row && row < offset+len && row < n && col < m){
//printf("offset:%d len:%d %dx%d [%d] %f\n", offset, len, row, col, col * n + row, src[col * n + row]);
dst[col * n + row] = src[col * len + row-offset];
}
}
void join_rows_kernel_exec(const float *src, float *dst, int m, int n, int offset, int len){
/* specified block and grid size */
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((m+block.x-1)/block.x, (n+block.y-1)/block.y);
//printf("m:%d n:%d offset:%d len:%d\n", m, n, offset, len);
/* lunch kernel */
join_rows_kernel<<<grid, block>>>(src, dst, m, n, offset, len);
cudaThreadSynchronize();
}
|
769cc8dda262dbff87165a3ccfc1f86fe7fba252.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* function for projecting lidar points
*
*/
#include "../common.h"
__global__ void CameraTransformKernel(const float* const tform,
const float* const cam,
const size_t imWidth,
const size_t imHeight,
const float* const xIn,
const float* const yIn,
const float* const zIn,
const size_t numPoints,
float* const xOut,
float* const yOut,
bool* const valid){
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i >= numPoints){
return;
}
//transform points
float x = xIn[i]*tform[0] + yIn[i]*tform[4] + zIn[i]*tform[8] + tform[12];
float y = xIn[i]*tform[1] + yIn[i]*tform[5] + zIn[i]*tform[9] + tform[13];
float z = xIn[i]*tform[2] + yIn[i]*tform[6] + zIn[i]*tform[10] + tform[14];
bool v = true;
//panoramic camera model
y = (y/sqrt(z*z + x*x));
x = atan2(x,z);
//apply projective camera matrix
x = cam[0]*x + cam[6];
y = cam[4]*y + cam[7];
if((x < 0) || (y < 0) || (x >= imWidth) || (y >= imHeight)){
v = false;
}
//output points
xOut[i] = x;
yOut[i] = y;
valid[i] = v;
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
//initialize the MathWorks GPU API.
mxInitGPU();
//read data
mxGPUArray const * tformMat = mxGPUCreateFromMxArray(prhs[0]);
mxGPUArray const * camMat = mxGPUCreateFromMxArray(prhs[1]);
mxGPUArray const * pointsMat = mxGPUCreateFromMxArray(prhs[2]);
size_t imWidth = ((uint32_T *) mxGetData(prhs[3]))[1];
size_t imHeight = ((uint32_T *) mxGetData(prhs[3]))[0];
size_t numPoints = mxGPUGetDimensions(pointsMat)[0];
//get input pointers
float* tformPtr = (float*)(mxGPUGetDataReadOnly(tformMat));
float* camPtr = (float*)(mxGPUGetDataReadOnly(camMat));
float* xInPtr = (float*)(mxGPUGetDataReadOnly(pointsMat));
float* yInPtr = &(xInPtr[numPoints]);
float* zInPtr = &(yInPtr[numPoints]);
//create output
mwSize outSize[] = {numPoints,2};
mxGPUArray* outMat = mxGPUCreateGPUArray(2, outSize, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
plhs[1] = mxGPUCreateMxArrayOnGPU(outMat);
outSize[1] = 1;
mxGPUArray* validMat = mxGPUCreateGPUArray(2, outSize, mxLOGICAL_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
plhs[0] = mxGPUCreateMxArrayOnGPU(validMat);
float* xOutPtr = (float*)(mxGPUGetData(outMat));
float* yOutPtr = &(xOutPtr[numPoints]);
bool* validPtr = (bool*)(mxGPUGetData(validMat));
//run and get ouputs
hipLaunchKernelGGL(( CameraTransformKernel), dim3(gridSize(numPoints)), dim3(BLOCK_SIZE), 0, 0, tformPtr, camPtr, imWidth, imHeight, xInPtr, yInPtr, zInPtr, numPoints, xOutPtr, yOutPtr,validPtr);
CudaCheckError();
//destroy reference structures
mxGPUDestroyGPUArray(tformMat);
mxGPUDestroyGPUArray(camMat);
mxGPUDestroyGPUArray(pointsMat);
mxGPUDestroyGPUArray(outMat);
mxGPUDestroyGPUArray(validMat);
}
| 769cc8dda262dbff87165a3ccfc1f86fe7fba252.cu | /* function for projecting lidar points
*
*/
#include "../common.h"
__global__ void CameraTransformKernel(const float* const tform,
const float* const cam,
const size_t imWidth,
const size_t imHeight,
const float* const xIn,
const float* const yIn,
const float* const zIn,
const size_t numPoints,
float* const xOut,
float* const yOut,
bool* const valid){
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i >= numPoints){
return;
}
//transform points
float x = xIn[i]*tform[0] + yIn[i]*tform[4] + zIn[i]*tform[8] + tform[12];
float y = xIn[i]*tform[1] + yIn[i]*tform[5] + zIn[i]*tform[9] + tform[13];
float z = xIn[i]*tform[2] + yIn[i]*tform[6] + zIn[i]*tform[10] + tform[14];
bool v = true;
//panoramic camera model
y = (y/sqrt(z*z + x*x));
x = atan2(x,z);
//apply projective camera matrix
x = cam[0]*x + cam[6];
y = cam[4]*y + cam[7];
if((x < 0) || (y < 0) || (x >= imWidth) || (y >= imHeight)){
v = false;
}
//output points
xOut[i] = x;
yOut[i] = y;
valid[i] = v;
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
//initialize the MathWorks GPU API.
mxInitGPU();
//read data
mxGPUArray const * tformMat = mxGPUCreateFromMxArray(prhs[0]);
mxGPUArray const * camMat = mxGPUCreateFromMxArray(prhs[1]);
mxGPUArray const * pointsMat = mxGPUCreateFromMxArray(prhs[2]);
size_t imWidth = ((uint32_T *) mxGetData(prhs[3]))[1];
size_t imHeight = ((uint32_T *) mxGetData(prhs[3]))[0];
size_t numPoints = mxGPUGetDimensions(pointsMat)[0];
//get input pointers
float* tformPtr = (float*)(mxGPUGetDataReadOnly(tformMat));
float* camPtr = (float*)(mxGPUGetDataReadOnly(camMat));
float* xInPtr = (float*)(mxGPUGetDataReadOnly(pointsMat));
float* yInPtr = &(xInPtr[numPoints]);
float* zInPtr = &(yInPtr[numPoints]);
//create output
mwSize outSize[] = {numPoints,2};
mxGPUArray* outMat = mxGPUCreateGPUArray(2, outSize, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
plhs[1] = mxGPUCreateMxArrayOnGPU(outMat);
outSize[1] = 1;
mxGPUArray* validMat = mxGPUCreateGPUArray(2, outSize, mxLOGICAL_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
plhs[0] = mxGPUCreateMxArrayOnGPU(validMat);
float* xOutPtr = (float*)(mxGPUGetData(outMat));
float* yOutPtr = &(xOutPtr[numPoints]);
bool* validPtr = (bool*)(mxGPUGetData(validMat));
//run and get ouputs
CameraTransformKernel<<<gridSize(numPoints), BLOCK_SIZE>>>(tformPtr, camPtr, imWidth, imHeight, xInPtr, yInPtr, zInPtr, numPoints, xOutPtr, yOutPtr,validPtr);
CudaCheckError();
//destroy reference structures
mxGPUDestroyGPUArray(tformMat);
mxGPUDestroyGPUArray(camMat);
mxGPUDestroyGPUArray(pointsMat);
mxGPUDestroyGPUArray(outMat);
mxGPUDestroyGPUArray(validMat);
}
|
edb564d2a4512ecf90f10f7bb2629c260ddc55d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void primal(float* u, float* u_, const float* f, const float* p1,
const float* p2, const double tau, const int X, const int Y)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
// center point
int c = y*X + x;
float div_x = 0.0f;
float div_y = 0.0f;
if (x == 0)
div_x = p1[c];
if (x > 0 & x < X-1)
div_x = p1[c]-p1[c-1];
if (x == X-1)
div_x = -p1[c-1];
if (y == 0)
div_y = p2[c];
if (y > 0 && y < Y-1)
div_y = p2[c]-p2[c-X];
if (y == Y-1)
div_y = -p2[c-X];
float u_old = u[c];
u[c] = (u_old + tau*(+div_x+div_y+f[c]))/(1+tau);
u_[c] = 2*u[c]-u_old;
}
| edb564d2a4512ecf90f10f7bb2629c260ddc55d0.cu | __global__ void primal(float* u, float* u_, const float* f, const float* p1,
const float* p2, const double tau, const int X, const int Y)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
// center point
int c = y*X + x;
float div_x = 0.0f;
float div_y = 0.0f;
if (x == 0)
div_x = p1[c];
if (x > 0 & x < X-1)
div_x = p1[c]-p1[c-1];
if (x == X-1)
div_x = -p1[c-1];
if (y == 0)
div_y = p2[c];
if (y > 0 && y < Y-1)
div_y = p2[c]-p2[c-X];
if (y == Y-1)
div_y = -p2[c-X];
float u_old = u[c];
u[c] = (u_old + tau*(+div_x+div_y+f[c]))/(1+tau);
u_[c] = 2*u[c]-u_old;
}
|
1a84108dd98e820e0ace4c052ee55b1597ff4af6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/crop_layer.hpp"
namespace caffe {
__device__ int compute_uncropped_index(
int index,
const int ndims,
const int* src_strides,
const int* dest_strides,
const int* offsets) {
int dest_index = index;
int src_index = 0;
for (int i = 0; i < ndims; ++i) {
int coord = dest_index / dest_strides[i];
dest_index -= coord * dest_strides[i];
src_index += src_strides[i] * (coord + offsets[i]);
}
return src_index;
}
template <typename Dtype>
__global__ void crop_kernel_forward(const int nthreads,
const int ndims,
const int* src_strides,
const int* dest_strides,
const int* offsets,
const Dtype* src, Dtype* dest) {
CUDA_KERNEL_LOOP(index, nthreads) {
int src_index = compute_uncropped_index(
index, ndims, src_strides, dest_strides, offsets);
dest[index] = src[src_index];
}
}
template <typename Dtype>
__global__ void crop_kernel_backward(const int nthreads,
const int ndims,
const int* src_strides,
const int* dest_strides,
const int* offsets,
Dtype* src, const Dtype* dest) {
CUDA_KERNEL_LOOP(index, nthreads) {
int src_index = compute_uncropped_index(
index, ndims, src_strides, dest_strides, offsets);
src[src_index] = dest[index];
}
}
template <typename Dtype>
void CropLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const bool preforward_flag) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int n = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( crop_kernel_forward), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n,
bottom[0]->num_axes(),
src_strides_.gpu_data(),
dest_strides_.gpu_data(),
offsets.gpu_data(),
bottom_data, top_data);
}
template <typename Dtype>
void CropLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom,
const bool prebackward_flag) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int n = top[0]->count();
if (propagate_down[0]) {
caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( crop_kernel_backward), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n,
bottom[0]->num_axes(),
src_strides_.gpu_data(),
dest_strides_.gpu_data(),
offsets.gpu_data(),
bottom_diff, top_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CropLayer);
} // namespace caffe
| 1a84108dd98e820e0ace4c052ee55b1597ff4af6.cu | #include <vector>
#include "caffe/layers/crop_layer.hpp"
namespace caffe {
__device__ int compute_uncropped_index(
int index,
const int ndims,
const int* src_strides,
const int* dest_strides,
const int* offsets) {
int dest_index = index;
int src_index = 0;
for (int i = 0; i < ndims; ++i) {
int coord = dest_index / dest_strides[i];
dest_index -= coord * dest_strides[i];
src_index += src_strides[i] * (coord + offsets[i]);
}
return src_index;
}
template <typename Dtype>
__global__ void crop_kernel_forward(const int nthreads,
const int ndims,
const int* src_strides,
const int* dest_strides,
const int* offsets,
const Dtype* src, Dtype* dest) {
CUDA_KERNEL_LOOP(index, nthreads) {
int src_index = compute_uncropped_index(
index, ndims, src_strides, dest_strides, offsets);
dest[index] = src[src_index];
}
}
template <typename Dtype>
__global__ void crop_kernel_backward(const int nthreads,
const int ndims,
const int* src_strides,
const int* dest_strides,
const int* offsets,
Dtype* src, const Dtype* dest) {
CUDA_KERNEL_LOOP(index, nthreads) {
int src_index = compute_uncropped_index(
index, ndims, src_strides, dest_strides, offsets);
src[src_index] = dest[index];
}
}
template <typename Dtype>
void CropLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const bool preforward_flag) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int n = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
crop_kernel_forward<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(n,
bottom[0]->num_axes(),
src_strides_.gpu_data(),
dest_strides_.gpu_data(),
offsets.gpu_data(),
bottom_data, top_data);
}
template <typename Dtype>
void CropLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom,
const bool prebackward_flag) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
int n = top[0]->count();
if (propagate_down[0]) {
caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
crop_kernel_backward<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(n,
bottom[0]->num_axes(),
src_strides_.gpu_data(),
dest_strides_.gpu_data(),
offsets.gpu_data(),
bottom_diff, top_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CropLayer);
} // namespace caffe
|
97179be84e5138d09f349d4271a81f3511cd8cd9.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <random>
#include <algorithm>
#include <tuple>
#include "hash_table.hpp"
#include "profile_printer.hpp"
#include "bloomfilter.hpp"
#include "task_manager.hpp"
#include "timeline.hpp"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "bloomfilter/util.hpp"
#include <amsfilter_model/model.hpp>
#include <amsfilter_model/fpr.hpp>
#include <amsfilter/amsfilter_lite.hpp>
#include <amsfilter/internal/blocked_bloomfilter_template.hpp>
#include <boost/tokenizer.hpp>
#include <dtl/env.hpp>
#include <dtl/thread.hpp>
constexpr static double kTwScale = 1000.0;
amsfilter::Config parse_filter_config(const std::string config_str) {
using tokenizer = boost::tokenizer<boost::char_separator<char>>;
boost::char_separator<char> sep{","};
tokenizer tok{config_str, sep};
auto tok_it = tok.begin();
// The filter parameters.
amsfilter::Config config;
config.word_cnt_per_block = u32(std::stoul(*tok_it)); tok_it++;
config.sector_cnt = u32(std::stoul(*tok_it)); tok_it++;
config.zone_cnt = u32(std::stoul(*tok_it)); tok_it++;
config.k = u32(std::stoul(*tok_it)); tok_it++;
return config;
}
void gen_csv(const std::string& fname, const Table& t, bool probe, size_t num_payload) {
uint32_t *table_keys = (uint32_t *)t.columns[0];
std::ofstream f;
f.open(fname);
auto num = t.size();
for (size_t row=0; row<num; row++) {
f << table_keys[row];
if (!probe) {
for (int k=0; k<num_payload; k++) {
f << "|";
f << "0";
}
}
f << "\n";
}
f.close();
};
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
size_t file_size(const std::string& fname) {
struct stat st;
if (!stat(fname.c_str(), &st)) {
return st.st_size;
}
return 0;
}
void write_column(const std::string& file, Table& table, size_t col, size_t num) {
std::ofstream out(file, std::ios::out | std::ios::binary);
assert(out.is_open());
int32_t *d = (int32_t *)table.columns[col];
out.write((char*)d, sizeof(int32_t) * num);
out.close();
};
void write_ksum(const std::string& file, int64_t ksum) {
std::ofstream out(file, std::ios::out | std::ios::binary);
assert(out.is_open());
out.write(reinterpret_cast<const char *>(&ksum), sizeof(ksum));
out.close();
};
void read_sum(const std::string& file, int64_t& ksum) {
std::ifstream in(file, std::ios::in | std::ios::binary);
assert(in.is_open());
in.read(reinterpret_cast<char *>(&ksum), sizeof(int64_t));
in.close();
};
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
void read_column(Table& table, const std::string& file, size_t col, size_t num, size_t scale) {
if (scale <= 0) {
scale = 1;
}
int fd;
struct stat sb;
const size_t bytes = sizeof(int32_t) * num;
fd = open(file.c_str(), O_RDONLY);
assert(fd > 0);
memset(&sb, 0, sizeof(sb));
fstat(fd, &sb);
assert((uint64_t)sb.st_size == bytes);
table.delloc_columns();
char* area = (char*)mmap(NULL,
scale * bytes, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
assert(area != MAP_FAILED);
assert((bytes % 4*1024) == 0);
for (size_t s = 0; s < scale; s++) {
char* dest = area + bytes * s;
char* address = (char*)mmap(dest,
bytes, PROT_READ, MAP_FIXED | MAP_SHARED, fd, 0);
// printf("%d: -> %p got %p\n", (int)s, dest, address);
if (address == MAP_FAILED) {
printf("error = %s\n", strerror(errno));
assert(false);
}
assert(address != MAP_FAILED);
assert(address == dest);
}
close(fd);
// check data
uint64_t sum = 0;
Vectorized::glob_sum(&sum, (int32_t*)area, nullptr, scale*num);
if (sum % scale != 0) {
fprintf(stderr, "read_column: Sum not divisible by scale\n");
assert(false);
}
table.columns[col] = area;
#if 0
std::ifstream in(file, std::ios::in | std::ios::binary);
assert(in.is_open());
int32_t *d = (int32_t *)table.columns[col];
in.read((char*)d, sizeof(int32_t) * num);
in.close();
#endif
};
#include <tuple>
#include <sstream>
void test() {
int sel[kVecSize];
uint8_t bit;
int num;
uint32_t bit32;
bit = 0xFF;
num = Vectorized::select_match_bit(true, sel, &bit, 8);
assert(num == 8);
for (int i=0; i<num; i++) {
assert(sel[i] == i);
}
bit = 1;
num = Vectorized::select_match_bit(true, sel, &bit, 8);
assert(num == 1);
assert(sel[0] == 0);
bit32 = 0;
int exp_num = 0;
for (int i=0; i<32; i++) {
if (i % 4 == 0) {
bit32 |= 1 << i;
exp_num++;
}
}
memset(sel, 0, sizeof(sel));
num = Vectorized::select_match_bit(true, sel, (uint8_t*)&bit32, 30);
assert(8 == num);
assert(num == exp_num);
assert(sel[0] == 0);
assert(sel[1] == 4);
assert(sel[2] == 8);
assert(sel[3] == 12);
assert(sel[4] == 16);
assert(sel[5] == 20);
assert(sel[6] == 24);
assert(sel[7] == 28);
}
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
std::tuple<amsfilter::Params, amsfilter::Params> determine_filter_configuration(params_t params, double tw) {
// Obtain a model instance. - Note: The calibration tool needs to be executed
// before.
amsfilter::Model model;
//CPU env
const auto thread_count = params.num_threads;//std::thread::hardware_concurrency() / 2;
const auto cpu_env = amsfilter::model::Env::cpu(thread_count);
// GPU env
const auto device_no = 0u; // cuda device
const auto gpu_env = amsfilter::model::Env::gpu(device_no, amsfilter::model::Memory::HOST_PINNED);
// Obtain the parameters for a (close to) performance-optimal filter.
// The model needs the following two values to find the optimal parameters:
// build size (n): The number of keys that will be inserted in the filter.
// work time (tw): The execution time in nanoseconds that is saved when an
// element is filtered out.
const auto n = params.build_size;
const auto cpu_params = model.determine_filter_params(cpu_env, n, tw);
const auto gpu_params = model.determine_filter_params(gpu_env, n, tw);
std::cout
<< "Host-side filter: m=" << cpu_params.get_filter_size()
<< ", config=" << cpu_params.get_filter_config() << std::endl;
std::cout
<< "Device-side filter: m=" << gpu_params.get_filter_size()
<< ", config=" << gpu_params.get_filter_config() << std::endl;
return std::make_tuple(cpu_params, gpu_params);
}
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
int main(int argc, char** argv) {
test();
auto params = parse_command_line(argc, argv);
TaskManager manager;
std::ofstream results_file;
results_file.open("results.csv", std::ios::out);
FileTimeline<TimelineEvent>* timeline = nullptr;
if (params.timeline_path.size() > 0) {
timeline = new FileTimeline<TimelineEvent>(params.timeline_path);
}
std::cout << " Probe Size: " << params.probe_size << " -- Build Size: " << params.build_size << std::endl;
size_t build_size = params.build_size;
// size_t probe_size = params.probe_size;
const size_t real_probe_size = params.probe_scale ? (params.probe_size / params.probe_scale) : params.probe_size;
const size_t virt_probe_size = params.probe_size;
size_t selectivity = params.selectivity;
size_t num_columns = params.num_columns;
Table table_build(num_columns, build_size);
Table table_probe(1,real_probe_size);
auto gen_fname = [&] (size_t id) {
std::ostringstream s;
s << "data_" << id << "_" << "_s_" << selectivity
<< "_b_" << build_size << "_p_" << real_probe_size << ".bin";
return s.str();
};
if (params.measure_tw) {
params.cpu_bloomfilter = 0;
params.gpu = 0;
}
const std::string bfile(gen_fname(0));
const std::string pfile(gen_fname(1));
const std::string ksum(gen_fname(3));
bool cached = true;
if (!file_size(bfile) || !file_size(pfile) || !file_size(ksum)) {
std::cout << "Files not cached. Recreating ... with "<< real_probe_size << std::endl;
// not cached, create files
cached = false;
populate_table(table_build);
populate_table(table_probe);
set_selectivity(table_build, table_probe, selectivity, params.probe_scale);
auto expected_ksum = calculate_matches_sum(table_build, table_probe, selectivity);
std::cout << "Writing ksum to disk ..." << std::endl;
write_ksum(ksum, expected_ksum);
std::cout << "Writing 'build' to disk ..." << std::endl;
write_column(bfile, table_build, 0, build_size);
std::cout << "Writing 'probe' to disk ..." << std::endl;
write_column(pfile, table_probe, 0, real_probe_size);
std::cout << "Done" << std::endl;
}
if (params.only_generate) {
exit(0);
}
// load data
assert(file_size(bfile) > 0);
assert(file_size(pfile) > 0);
assert(file_size(ksum) > 0);
assert(file_size(ksum) == sizeof(int64_t));
assert(file_size(bfile) == sizeof(int32_t) * build_size);
assert(file_size(pfile) == sizeof(int32_t) * real_probe_size);
read_column(table_build, bfile, 0, build_size, 0);
read_column(table_probe, pfile, 0, real_probe_size, params.probe_scale);
int64_t expected_ksum = 0;
read_sum(ksum, expected_ksum);
if (params.probe_scale >= 1) {
expected_ksum *= params.probe_scale;
}
table_probe.capacity = virt_probe_size;
assert(params.gpu_morsel_size >= params.cpu_morsel_size);
if (!params.csv_path.empty()) {
std::cout << "Writing build relation ..." <<std::endl;
gen_csv(params.csv_path + "build.csv", table_build, false, params.num_payloads);
std::cout << "Writing probe relation ..." <<std::endl;
gen_csv(params.csv_path + "probe.csv", table_probe, true, params.num_payloads);
std::cout << "Done" << std::endl;
exit(0);
}
auto ht = new HashTablinho(
sizeof(int32_t) + // key
params.num_payloads * sizeof(int32_t), // payload cols
params.build_size);
//build table
uint32_t hashs[kVecSize];
table_build.chunk([&] (auto columns, auto num_columns, auto offset, auto num) {
int32_t *table_keys = (int32_t *)table_build.columns[0];
Vectorized::chunk(offset, num, [&] (auto offset, auto num) {
auto keys = table_keys+offset;
int* sel = nullptr;
Vectorized::map_hash(hashs, keys, sel, num);
ht->Insert(keys, hashs, sel, num);
}, kVecSize);
// FIXME: build bloom filter
}, [&] () {
// finished
ht->FinalizeBuild();
});
std::vector<HashTablinho*> hts = {ht};
Pipeline pipeline(hts, table_probe, params);
float max_cpu_sel;
float max_gpu_sel;
// Build Blocked Bloom Filter on CPU (Block size = 128 Bytes)
{
amsfilter::Config cpu_config, gpu_config;
size_t cpu_m, gpu_m;
if(params.manual_filter) {
cpu_config = parse_filter_config(params.filter_config);
gpu_config = cpu_config;
cpu_m = params.filter_size;
gpu_m = cpu_m;
} else {
const auto filter_tuple = determine_filter_configuration(params, (double)params.tw / kTwScale);
const auto cpu_params = std::get<0>(filter_tuple);
const auto gpu_params = std::get<1>(filter_tuple);
cpu_config = cpu_params.get_filter_config();
cpu_m = cpu_params.get_filter_size();
gpu_config = gpu_params.get_filter_config();
gpu_m = gpu_params.get_filter_size();
max_cpu_sel = cpu_params.get_max_selectivity();
max_gpu_sel = gpu_params.get_max_selectivity();
}
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
if (params.print_bf_conf > 0) {
auto pconf = [&] (bool gpu, auto bf, auto m) {
size_t n = params.build_size;
auto smaller = [] (auto a, auto b) {
return a < b ? "smaller" : "NOT";
};
std::cout << "w " << bf.word_cnt_per_block
<< " s " << bf.sector_cnt
<< " z " << bf.zone_cnt
<< " k " << bf.k
<< " m " << m
<< (gpu ? " GPU " : " CPU ")
<< " slowdown " << params.slowdown
<< " filtersize " << params.filter_size
<< " tw " << params.tw
<< " probe_size " << params.probe_size
<< " build_size " << params.build_size
<< " fpr " << amsfilter::fpr(bf, m, n)
<< " sel " << (float)params.selectivity / 100
<< " max_cpu_sel " << max_cpu_sel
<< " smaller " << smaller((float)params.selectivity / 100, max_cpu_sel)
<< " max_gpu_sel " << max_gpu_sel
<< " smaller " << smaller((float)params.selectivity / 100, max_gpu_sel)
<< std::endl;
};
pconf(false, cpu_config, cpu_m);
pconf(true, gpu_config, gpu_m);
exit(0);
}
// CPU Config
std::cout << "Filter parameters: w=" << cpu_config.word_cnt_per_block
<< ", s=" << cpu_config.sector_cnt
<< ", z=" << cpu_config.zone_cnt
<< ", k=" << cpu_config.k
<< ", m=" << cpu_m
<< std::endl;
// GPU Config
std::cout << "Filter parameters: w=" << gpu_config.word_cnt_per_block
<< ", s=" << gpu_config.sector_cnt
<< ", z=" << gpu_config.zone_cnt
<< ", k=" << gpu_config.k
<< ", m=" << gpu_m
<< std::endl;
//size_t m = params.filter_size;
// Construct the filter.
FilterWrapper filter_cpu(cpu_m, cpu_config);
FilterWrapper filter_gpu(gpu_m, gpu_config);
uint32_t *table_keys = (uint32_t *)table_build.columns[0];
uint32_t *probe_keys = static_cast<uint32_t*>(table_probe.columns[0]);
std::set<uint32_t> positions;
if (!params.measure_tw) {
for (std::size_t i = 0; i < table_build.size(); ++i) {
const auto key = (uint32_t)*(table_keys + i);
//std::cout << "Insert key " << key << " position " << i << '\n';
filter_cpu.insert(key);
filter_gpu.insert(key);
}
// Validate Filter on CPU
for (std::size_t i = 0; i < table_build.size(); ++i) {
const auto key = (uint32_t)*(table_keys + i);
auto match_cpu = filter_cpu.contains(key);
auto match_gpu = filter_gpu.contains(key);
if(!match_cpu || !match_gpu)
std::cout << "no match key " << key << " position "<< i << '\n';
}
}
std::cout << std::endl;
// cuda instance of bloom filter logic on GPU with keys on CPU
int64_t key_cnt = 0;
uint32_t *keys = nullptr;
if(params.in_gpu_keys){
key_cnt = table_probe.size();
keys = static_cast<uint32_t*>(table_probe.columns[0]);
filter_gpu.cache_keys(keys, key_cnt);
}
ProfilePrinter profile_info(params);
profile_info.write_header(results_file);
for(auto i = 0; i < params.num_repetitions + params.num_warmup; ++i) {
//execute probe
const auto start = std::chrono::system_clock::now();
const auto start_cycles = rdtsc();
manager.execute_query(pipeline, filter_cpu, filter_gpu, profile_info,
i == params.num_warmup ? timeline : nullptr,
max_cpu_sel, max_gpu_sel);
auto end_cycles = rdtsc();
auto end = std::chrono::system_clock::now();
if (i >= params.num_warmup) {
// Profile output
profile_info.pipeline_cycles += (double)(end_cycles - start_cycles);
profile_info.pipeline_sum_thread_cycles += (double)(pipeline.prof_pipeline_cycles.cycles);
profile_info.pipeline_time += std::chrono::duration<double>(end - start).count();
profile_info.cpu_time += (double)pipeline.prof_aggr_cpu.cycles;
profile_info.cpu_join_time += (double)pipeline.prof_join_cpu.cycles;
profile_info.cpu_expop_time += (double)pipeline.prof_expop_cpu.cycles;
profile_info.gpu_time += (double)pipeline.prof_aggr_gpu.cycles;
profile_info.cpu_gpu_time += (double)pipeline.prof_aggr_gpu_cpu_join.cycles;
profile_info.tuples_gpu_probe += (double)pipeline.tuples_gpu_probe;
profile_info.tuples_gpu_consume += (double)pipeline.tuples_gpu_consume;
#ifdef PROFILE
profile_info.pre_filter_tuples += pipeline.num_prefilter;
profile_info.fitered_tuples += pipeline.num_postfilter;
profile_info.pre_join_tuples += pipeline.num_prejoin;
profile_info.pos_join_tuples += pipeline.num_postjoin;
#endif
profile_info.semijoin_time += pipeline.prof_semijoin_time;
}
if(expected_ksum != pipeline.ksum) {
std::cout << " invalid ksum:" << pipeline.ksum << " expected:" << expected_ksum << std::endl;
}
pipeline.reset();
}
double final_elapsed_time = profile_info.pipeline_time / (double)params.num_repetitions;
std::cout << " Probe time (sec):" << final_elapsed_time << std::endl;
if (params.measure_tw) {
double total_semijoin_cycles = profile_info.semijoin_time / (double)params.num_repetitions;
double total_cycles = profile_info.pipeline_sum_thread_cycles / (double)params.num_repetitions;
double semijoin_frac = total_semijoin_cycles / total_cycles;
printf("TotCycles %f SJCycles %f SJPerc %f\n", total_cycles, total_semijoin_cycles, semijoin_frac);
double giga = 1000.0 * 1000.0 * 1000.0;
double tw_cyc = total_semijoin_cycles / (double)params.num_threads / (double)table_probe.size(); // / (double)params.num_threads;
double tw_ns = semijoin_frac * final_elapsed_time * giga / (double)table_probe.size(); // / (double)params.num_threads;
printf("TW %f ps %f cyc\n", tw_ns * kTwScale, tw_cyc);
} else {
profile_info.write_profile(results_file);
}
}
results_file.close();
return 0;
}
//===----------------------------------------------------------------------===// | 97179be84e5138d09f349d4271a81f3511cd8cd9.cu | #include <iostream>
#include <random>
#include <algorithm>
#include <tuple>
#include "hash_table.hpp"
#include "profile_printer.hpp"
#include "bloomfilter.hpp"
#include "task_manager.hpp"
#include "timeline.hpp"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "bloomfilter/util.hpp"
#include <amsfilter_model/model.hpp>
#include <amsfilter_model/fpr.hpp>
#include <amsfilter/amsfilter_lite.hpp>
#include <amsfilter/internal/blocked_bloomfilter_template.hpp>
#include <boost/tokenizer.hpp>
#include <dtl/env.hpp>
#include <dtl/thread.hpp>
constexpr static double kTwScale = 1000.0;
amsfilter::Config parse_filter_config(const std::string config_str) {
using tokenizer = boost::tokenizer<boost::char_separator<char>>;
boost::char_separator<char> sep{","};
tokenizer tok{config_str, sep};
auto tok_it = tok.begin();
// The filter parameters.
amsfilter::Config config;
config.word_cnt_per_block = u32(std::stoul(*tok_it)); tok_it++;
config.sector_cnt = u32(std::stoul(*tok_it)); tok_it++;
config.zone_cnt = u32(std::stoul(*tok_it)); tok_it++;
config.k = u32(std::stoul(*tok_it)); tok_it++;
return config;
}
void gen_csv(const std::string& fname, const Table& t, bool probe, size_t num_payload) {
uint32_t *table_keys = (uint32_t *)t.columns[0];
std::ofstream f;
f.open(fname);
auto num = t.size();
for (size_t row=0; row<num; row++) {
f << table_keys[row];
if (!probe) {
for (int k=0; k<num_payload; k++) {
f << "|";
f << "0";
}
}
f << "\n";
}
f.close();
};
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
size_t file_size(const std::string& fname) {
struct stat st;
if (!stat(fname.c_str(), &st)) {
return st.st_size;
}
return 0;
}
void write_column(const std::string& file, Table& table, size_t col, size_t num) {
std::ofstream out(file, std::ios::out | std::ios::binary);
assert(out.is_open());
int32_t *d = (int32_t *)table.columns[col];
out.write((char*)d, sizeof(int32_t) * num);
out.close();
};
void write_ksum(const std::string& file, int64_t ksum) {
std::ofstream out(file, std::ios::out | std::ios::binary);
assert(out.is_open());
out.write(reinterpret_cast<const char *>(&ksum), sizeof(ksum));
out.close();
};
void read_sum(const std::string& file, int64_t& ksum) {
std::ifstream in(file, std::ios::in | std::ios::binary);
assert(in.is_open());
in.read(reinterpret_cast<char *>(&ksum), sizeof(int64_t));
in.close();
};
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
void read_column(Table& table, const std::string& file, size_t col, size_t num, size_t scale) {
if (scale <= 0) {
scale = 1;
}
int fd;
struct stat sb;
const size_t bytes = sizeof(int32_t) * num;
fd = open(file.c_str(), O_RDONLY);
assert(fd > 0);
memset(&sb, 0, sizeof(sb));
fstat(fd, &sb);
assert((uint64_t)sb.st_size == bytes);
table.delloc_columns();
char* area = (char*)mmap(NULL,
scale * bytes, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
assert(area != MAP_FAILED);
assert((bytes % 4*1024) == 0);
for (size_t s = 0; s < scale; s++) {
char* dest = area + bytes * s;
char* address = (char*)mmap(dest,
bytes, PROT_READ, MAP_FIXED | MAP_SHARED, fd, 0);
// printf("%d: -> %p got %p\n", (int)s, dest, address);
if (address == MAP_FAILED) {
printf("error = %s\n", strerror(errno));
assert(false);
}
assert(address != MAP_FAILED);
assert(address == dest);
}
close(fd);
// check data
uint64_t sum = 0;
Vectorized::glob_sum(&sum, (int32_t*)area, nullptr, scale*num);
if (sum % scale != 0) {
fprintf(stderr, "read_column: Sum not divisible by scale\n");
assert(false);
}
table.columns[col] = area;
#if 0
std::ifstream in(file, std::ios::in | std::ios::binary);
assert(in.is_open());
int32_t *d = (int32_t *)table.columns[col];
in.read((char*)d, sizeof(int32_t) * num);
in.close();
#endif
};
#include <tuple>
#include <sstream>
void test() {
int sel[kVecSize];
uint8_t bit;
int num;
uint32_t bit32;
bit = 0xFF;
num = Vectorized::select_match_bit(true, sel, &bit, 8);
assert(num == 8);
for (int i=0; i<num; i++) {
assert(sel[i] == i);
}
bit = 1;
num = Vectorized::select_match_bit(true, sel, &bit, 8);
assert(num == 1);
assert(sel[0] == 0);
bit32 = 0;
int exp_num = 0;
for (int i=0; i<32; i++) {
if (i % 4 == 0) {
bit32 |= 1 << i;
exp_num++;
}
}
memset(sel, 0, sizeof(sel));
num = Vectorized::select_match_bit(true, sel, (uint8_t*)&bit32, 30);
assert(8 == num);
assert(num == exp_num);
assert(sel[0] == 0);
assert(sel[1] == 4);
assert(sel[2] == 8);
assert(sel[3] == 12);
assert(sel[4] == 16);
assert(sel[5] == 20);
assert(sel[6] == 24);
assert(sel[7] == 28);
}
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
std::tuple<amsfilter::Params, amsfilter::Params> determine_filter_configuration(params_t params, double tw) {
// Obtain a model instance. - Note: The calibration tool needs to be executed
// before.
amsfilter::Model model;
//CPU env
const auto thread_count = params.num_threads;//std::thread::hardware_concurrency() / 2;
const auto cpu_env = amsfilter::model::Env::cpu(thread_count);
// GPU env
const auto device_no = 0u; // cuda device
const auto gpu_env = amsfilter::model::Env::gpu(device_no, amsfilter::model::Memory::HOST_PINNED);
// Obtain the parameters for a (close to) performance-optimal filter.
// The model needs the following two values to find the optimal parameters:
// build size (n): The number of keys that will be inserted in the filter.
// work time (tw): The execution time in nanoseconds that is saved when an
// element is filtered out.
const auto n = params.build_size;
const auto cpu_params = model.determine_filter_params(cpu_env, n, tw);
const auto gpu_params = model.determine_filter_params(gpu_env, n, tw);
std::cout
<< "Host-side filter: m=" << cpu_params.get_filter_size()
<< ", config=" << cpu_params.get_filter_config() << std::endl;
std::cout
<< "Device-side filter: m=" << gpu_params.get_filter_size()
<< ", config=" << gpu_params.get_filter_config() << std::endl;
return std::make_tuple(cpu_params, gpu_params);
}
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
int main(int argc, char** argv) {
test();
auto params = parse_command_line(argc, argv);
TaskManager manager;
std::ofstream results_file;
results_file.open("results.csv", std::ios::out);
FileTimeline<TimelineEvent>* timeline = nullptr;
if (params.timeline_path.size() > 0) {
timeline = new FileTimeline<TimelineEvent>(params.timeline_path);
}
std::cout << " Probe Size: " << params.probe_size << " -- Build Size: " << params.build_size << std::endl;
size_t build_size = params.build_size;
// size_t probe_size = params.probe_size;
const size_t real_probe_size = params.probe_scale ? (params.probe_size / params.probe_scale) : params.probe_size;
const size_t virt_probe_size = params.probe_size;
size_t selectivity = params.selectivity;
size_t num_columns = params.num_columns;
Table table_build(num_columns, build_size);
Table table_probe(1,real_probe_size);
auto gen_fname = [&] (size_t id) {
std::ostringstream s;
s << "data_" << id << "_" << "_s_" << selectivity
<< "_b_" << build_size << "_p_" << real_probe_size << ".bin";
return s.str();
};
if (params.measure_tw) {
params.cpu_bloomfilter = 0;
params.gpu = 0;
}
const std::string bfile(gen_fname(0));
const std::string pfile(gen_fname(1));
const std::string ksum(gen_fname(3));
bool cached = true;
if (!file_size(bfile) || !file_size(pfile) || !file_size(ksum)) {
std::cout << "Files not cached. Recreating ... with "<< real_probe_size << std::endl;
// not cached, create files
cached = false;
populate_table(table_build);
populate_table(table_probe);
set_selectivity(table_build, table_probe, selectivity, params.probe_scale);
auto expected_ksum = calculate_matches_sum(table_build, table_probe, selectivity);
std::cout << "Writing ksum to disk ..." << std::endl;
write_ksum(ksum, expected_ksum);
std::cout << "Writing 'build' to disk ..." << std::endl;
write_column(bfile, table_build, 0, build_size);
std::cout << "Writing 'probe' to disk ..." << std::endl;
write_column(pfile, table_probe, 0, real_probe_size);
std::cout << "Done" << std::endl;
}
if (params.only_generate) {
exit(0);
}
// load data
assert(file_size(bfile) > 0);
assert(file_size(pfile) > 0);
assert(file_size(ksum) > 0);
assert(file_size(ksum) == sizeof(int64_t));
assert(file_size(bfile) == sizeof(int32_t) * build_size);
assert(file_size(pfile) == sizeof(int32_t) * real_probe_size);
read_column(table_build, bfile, 0, build_size, 0);
read_column(table_probe, pfile, 0, real_probe_size, params.probe_scale);
int64_t expected_ksum = 0;
read_sum(ksum, expected_ksum);
if (params.probe_scale >= 1) {
expected_ksum *= params.probe_scale;
}
table_probe.capacity = virt_probe_size;
assert(params.gpu_morsel_size >= params.cpu_morsel_size);
if (!params.csv_path.empty()) {
std::cout << "Writing build relation ..." <<std::endl;
gen_csv(params.csv_path + "build.csv", table_build, false, params.num_payloads);
std::cout << "Writing probe relation ..." <<std::endl;
gen_csv(params.csv_path + "probe.csv", table_probe, true, params.num_payloads);
std::cout << "Done" << std::endl;
exit(0);
}
auto ht = new HashTablinho(
sizeof(int32_t) + // key
params.num_payloads * sizeof(int32_t), // payload cols
params.build_size);
//build table
uint32_t hashs[kVecSize];
table_build.chunk([&] (auto columns, auto num_columns, auto offset, auto num) {
int32_t *table_keys = (int32_t *)table_build.columns[0];
Vectorized::chunk(offset, num, [&] (auto offset, auto num) {
auto keys = table_keys+offset;
int* sel = nullptr;
Vectorized::map_hash(hashs, keys, sel, num);
ht->Insert(keys, hashs, sel, num);
}, kVecSize);
// FIXME: build bloom filter
}, [&] () {
// finished
ht->FinalizeBuild();
});
std::vector<HashTablinho*> hts = {ht};
Pipeline pipeline(hts, table_probe, params);
float max_cpu_sel;
float max_gpu_sel;
// Build Blocked Bloom Filter on CPU (Block size = 128 Bytes)
{
amsfilter::Config cpu_config, gpu_config;
size_t cpu_m, gpu_m;
if(params.manual_filter) {
cpu_config = parse_filter_config(params.filter_config);
gpu_config = cpu_config;
cpu_m = params.filter_size;
gpu_m = cpu_m;
} else {
const auto filter_tuple = determine_filter_configuration(params, (double)params.tw / kTwScale);
const auto cpu_params = std::get<0>(filter_tuple);
const auto gpu_params = std::get<1>(filter_tuple);
cpu_config = cpu_params.get_filter_config();
cpu_m = cpu_params.get_filter_size();
gpu_config = gpu_params.get_filter_config();
gpu_m = gpu_params.get_filter_size();
max_cpu_sel = cpu_params.get_max_selectivity();
max_gpu_sel = gpu_params.get_max_selectivity();
}
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
if (params.print_bf_conf > 0) {
auto pconf = [&] (bool gpu, auto bf, auto m) {
size_t n = params.build_size;
auto smaller = [] (auto a, auto b) {
return a < b ? "smaller" : "NOT";
};
std::cout << "w " << bf.word_cnt_per_block
<< " s " << bf.sector_cnt
<< " z " << bf.zone_cnt
<< " k " << bf.k
<< " m " << m
<< (gpu ? " GPU " : " CPU ")
<< " slowdown " << params.slowdown
<< " filtersize " << params.filter_size
<< " tw " << params.tw
<< " probe_size " << params.probe_size
<< " build_size " << params.build_size
<< " fpr " << amsfilter::fpr(bf, m, n)
<< " sel " << (float)params.selectivity / 100
<< " max_cpu_sel " << max_cpu_sel
<< " smaller " << smaller((float)params.selectivity / 100, max_cpu_sel)
<< " max_gpu_sel " << max_gpu_sel
<< " smaller " << smaller((float)params.selectivity / 100, max_gpu_sel)
<< std::endl;
};
pconf(false, cpu_config, cpu_m);
pconf(true, gpu_config, gpu_m);
exit(0);
}
// CPU Config
std::cout << "Filter parameters: w=" << cpu_config.word_cnt_per_block
<< ", s=" << cpu_config.sector_cnt
<< ", z=" << cpu_config.zone_cnt
<< ", k=" << cpu_config.k
<< ", m=" << cpu_m
<< std::endl;
// GPU Config
std::cout << "Filter parameters: w=" << gpu_config.word_cnt_per_block
<< ", s=" << gpu_config.sector_cnt
<< ", z=" << gpu_config.zone_cnt
<< ", k=" << gpu_config.k
<< ", m=" << gpu_m
<< std::endl;
//size_t m = params.filter_size;
// Construct the filter.
FilterWrapper filter_cpu(cpu_m, cpu_config);
FilterWrapper filter_gpu(gpu_m, gpu_config);
uint32_t *table_keys = (uint32_t *)table_build.columns[0];
uint32_t *probe_keys = static_cast<uint32_t*>(table_probe.columns[0]);
std::set<uint32_t> positions;
if (!params.measure_tw) {
for (std::size_t i = 0; i < table_build.size(); ++i) {
const auto key = (uint32_t)*(table_keys + i);
//std::cout << "Insert key " << key << " position " << i << '\n';
filter_cpu.insert(key);
filter_gpu.insert(key);
}
// Validate Filter on CPU
for (std::size_t i = 0; i < table_build.size(); ++i) {
const auto key = (uint32_t)*(table_keys + i);
auto match_cpu = filter_cpu.contains(key);
auto match_gpu = filter_gpu.contains(key);
if(!match_cpu || !match_gpu)
std::cout << "no match key " << key << " position "<< i << '\n';
}
}
std::cout << std::endl;
// cuda instance of bloom filter logic on GPU with keys on CPU
int64_t key_cnt = 0;
uint32_t *keys = nullptr;
if(params.in_gpu_keys){
key_cnt = table_probe.size();
keys = static_cast<uint32_t*>(table_probe.columns[0]);
filter_gpu.cache_keys(keys, key_cnt);
}
ProfilePrinter profile_info(params);
profile_info.write_header(results_file);
for(auto i = 0; i < params.num_repetitions + params.num_warmup; ++i) {
//execute probe
const auto start = std::chrono::system_clock::now();
const auto start_cycles = rdtsc();
manager.execute_query(pipeline, filter_cpu, filter_gpu, profile_info,
i == params.num_warmup ? timeline : nullptr,
max_cpu_sel, max_gpu_sel);
auto end_cycles = rdtsc();
auto end = std::chrono::system_clock::now();
if (i >= params.num_warmup) {
// Profile output
profile_info.pipeline_cycles += (double)(end_cycles - start_cycles);
profile_info.pipeline_sum_thread_cycles += (double)(pipeline.prof_pipeline_cycles.cycles);
profile_info.pipeline_time += std::chrono::duration<double>(end - start).count();
profile_info.cpu_time += (double)pipeline.prof_aggr_cpu.cycles;
profile_info.cpu_join_time += (double)pipeline.prof_join_cpu.cycles;
profile_info.cpu_expop_time += (double)pipeline.prof_expop_cpu.cycles;
profile_info.gpu_time += (double)pipeline.prof_aggr_gpu.cycles;
profile_info.cpu_gpu_time += (double)pipeline.prof_aggr_gpu_cpu_join.cycles;
profile_info.tuples_gpu_probe += (double)pipeline.tuples_gpu_probe;
profile_info.tuples_gpu_consume += (double)pipeline.tuples_gpu_consume;
#ifdef PROFILE
profile_info.pre_filter_tuples += pipeline.num_prefilter;
profile_info.fitered_tuples += pipeline.num_postfilter;
profile_info.pre_join_tuples += pipeline.num_prejoin;
profile_info.pos_join_tuples += pipeline.num_postjoin;
#endif
profile_info.semijoin_time += pipeline.prof_semijoin_time;
}
if(expected_ksum != pipeline.ksum) {
std::cout << " invalid ksum:" << pipeline.ksum << " expected:" << expected_ksum << std::endl;
}
pipeline.reset();
}
double final_elapsed_time = profile_info.pipeline_time / (double)params.num_repetitions;
std::cout << " Probe time (sec):" << final_elapsed_time << std::endl;
if (params.measure_tw) {
double total_semijoin_cycles = profile_info.semijoin_time / (double)params.num_repetitions;
double total_cycles = profile_info.pipeline_sum_thread_cycles / (double)params.num_repetitions;
double semijoin_frac = total_semijoin_cycles / total_cycles;
printf("TotCycles %f SJCycles %f SJPerc %f\n", total_cycles, total_semijoin_cycles, semijoin_frac);
double giga = 1000.0 * 1000.0 * 1000.0;
double tw_cyc = total_semijoin_cycles / (double)params.num_threads / (double)table_probe.size(); // / (double)params.num_threads;
double tw_ns = semijoin_frac * final_elapsed_time * giga / (double)table_probe.size(); // / (double)params.num_threads;
printf("TW %f ps %f cyc\n", tw_ns * kTwScale, tw_cyc);
} else {
profile_info.write_profile(results_file);
}
}
results_file.close();
return 0;
}
//===----------------------------------------------------------------------===// |
86a24fd1738ede433dc403407f04d4a66b068cf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "PerspectiveCamera.h"
#include "CudaDefines.h"
#include <stdio.h>
#include <iostream>
#include <string>
#include "Vector3.h"
#include "Rotator.h"
#include "Image.h"
#include "Renderer.h"
#include <GLFW\glfw3.h>
int main()
{
uint Width = 100;
uint Height = 100;
PerspectiveCamera* Cam = new PerspectiveCamera(Vector3(), Rotator(), Width, Height, 45);
Renderer R = Renderer(Width, Height);
GLFWwindow* window;
/* Initialize the library */
if (!glfwInit())
return -1;
/* Create a windowed mode window and its OpenGL context */
window = glfwCreateWindow(Width, Height, "Hello World", NULL, NULL);
if (!window)
{
glfwTerminate();
return -1;
}
/* Make the window's context current */
glfwMakeContextCurrent(window);
glClearColor(1, 0, 0, 1);
Color* pixels = (Color*)malloc(Width*Height*sizeof(Color));
glEnable(GL_TEXTURE_2D);
GLuint texID;
glGenTextures(1, &texID);
glBindTexture(GL_TEXTURE_2D, texID);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, Width, Height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
/* Loop until the user closes the window */
int windowWidth;
int windowHeight;
glfwGetWindowSize(window, &windowWidth, &windowHeight);
int Amount_Spheres = 3;
Sphere* spheres;
spheres = (Sphere*)malloc(Amount_Spheres * sizeof(Sphere));
spheres[0].Position = Vector3(10, 0, 0);
spheres[0].Radius = 5;
spheres[1].Position = Vector3(0, 10, 0);
spheres[1].Radius = 5;
spheres[2].Position = Vector3(0, 0, 10);
spheres[2].Radius = 5;
while (!glfwWindowShouldClose(window))
{
float r = (float)((double)rand() / (RAND_MAX)) + 1;
for (uint i = 0; i < Width*Height; i++){
pixels[i].R = (uchar)255;
pixels[i].G = (uchar)255;
pixels[i].B = (uchar)255;
pixels[i].A = (uchar)255;
}
R.Render(Cam, spheres, Amount_Spheres, pixels);
hipDeviceSynchronize();
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT);
/* Render here */
glfwGetWindowSize(window, &windowWidth, &windowHeight);
glViewport(0, 0, windowWidth, windowHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, 1, 0, 1, 0, 1);
glMatrixMode(GL_MODELVIEW);
glBindTexture(GL_TEXTURE_2D, texID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, Width, Height, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
//glColor3f(0, 1, 0);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex2f(-1, -1);
glTexCoord2f(1.0f, 0.0f);
glVertex2f(1, -1);
glTexCoord2f(1.0f, 1.0f);
glVertex2f(1, 1);
glTexCoord2f(0.0f, 1.0f);
glVertex2f(-1, 1);
glEnd();
//glDrawPixels(Width,Height,GL_RGBA8,GL_UNSIGNED_BYTE,pixels);
/* Swap front and back buffers */
glfwSwapBuffers(window);
/* Poll for and process events */
glfwPollEvents();
}
free(pixels);
delete Cam;
glfwTerminate();
return 0;
}
| 86a24fd1738ede433dc403407f04d4a66b068cf8.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "PerspectiveCamera.h"
#include "CudaDefines.h"
#include <stdio.h>
#include <iostream>
#include <string>
#include "Vector3.h"
#include "Rotator.h"
#include "Image.h"
#include "Renderer.h"
#include <GLFW\glfw3.h>
int main()
{
uint Width = 100;
uint Height = 100;
PerspectiveCamera* Cam = new PerspectiveCamera(Vector3(), Rotator(), Width, Height, 45);
Renderer R = Renderer(Width, Height);
GLFWwindow* window;
/* Initialize the library */
if (!glfwInit())
return -1;
/* Create a windowed mode window and its OpenGL context */
window = glfwCreateWindow(Width, Height, "Hello World", NULL, NULL);
if (!window)
{
glfwTerminate();
return -1;
}
/* Make the window's context current */
glfwMakeContextCurrent(window);
glClearColor(1, 0, 0, 1);
Color* pixels = (Color*)malloc(Width*Height*sizeof(Color));
glEnable(GL_TEXTURE_2D);
GLuint texID;
glGenTextures(1, &texID);
glBindTexture(GL_TEXTURE_2D, texID);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, Width, Height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
/* Loop until the user closes the window */
int windowWidth;
int windowHeight;
glfwGetWindowSize(window, &windowWidth, &windowHeight);
int Amount_Spheres = 3;
Sphere* spheres;
spheres = (Sphere*)malloc(Amount_Spheres * sizeof(Sphere));
spheres[0].Position = Vector3(10, 0, 0);
spheres[0].Radius = 5;
spheres[1].Position = Vector3(0, 10, 0);
spheres[1].Radius = 5;
spheres[2].Position = Vector3(0, 0, 10);
spheres[2].Radius = 5;
while (!glfwWindowShouldClose(window))
{
float r = (float)((double)rand() / (RAND_MAX)) + 1;
for (uint i = 0; i < Width*Height; i++){
pixels[i].R = (uchar)255;
pixels[i].G = (uchar)255;
pixels[i].B = (uchar)255;
pixels[i].A = (uchar)255;
}
R.Render(Cam, spheres, Amount_Spheres, pixels);
cudaDeviceSynchronize();
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT);
/* Render here */
glfwGetWindowSize(window, &windowWidth, &windowHeight);
glViewport(0, 0, windowWidth, windowHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, 1, 0, 1, 0, 1);
glMatrixMode(GL_MODELVIEW);
glBindTexture(GL_TEXTURE_2D, texID);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, Width, Height, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
//glColor3f(0, 1, 0);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex2f(-1, -1);
glTexCoord2f(1.0f, 0.0f);
glVertex2f(1, -1);
glTexCoord2f(1.0f, 1.0f);
glVertex2f(1, 1);
glTexCoord2f(0.0f, 1.0f);
glVertex2f(-1, 1);
glEnd();
//glDrawPixels(Width,Height,GL_RGBA8,GL_UNSIGNED_BYTE,pixels);
/* Swap front and back buffers */
glfwSwapBuffers(window);
/* Poll for and process events */
glfwPollEvents();
}
free(pixels);
delete Cam;
glfwTerminate();
return 0;
}
|
289d57d384fa4b4dce6a12c3b5be951ce4f36eff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/MultiLabelMarginCriterion.hip"
#else
static inline void THNN_(MultiLabelMarginCriterion_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *target) {
int64_t ndims = input->dim();
bool valid_inputs = (ndims == 2 && input->size(1) != 0) || (ndims == 1 && input->size(0) != 0) || ndims == 0;
TORCH_CHECK(
valid_inputs,
"Expected non-empty vector or matrix with optional 0-dim batch size, but got: ",
input->sizes());
if (ndims <= 1) {
int dim = input->dim() == 0 ? 1 : input->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
TORCH_CHECK(valid_inputs && target->dim() <= 1 && target->numel() == dim,
"inconsistent target size: ", target->sizes(), " for input of size: ", input->sizes());
} else if (ndims == 2) {
int nframe = input->size(0);
int dim = input->size(1);
TORCH_CHECK(
valid_inputs && target->dim() == 2 && target->size(0) == nframe && target->size(1) == dim,
"inconsistent target size: ", target->sizes(), " for input of size: ", input->sizes());
} else {
TORCH_CHECK(false, "Expected input of ndims <= 2, but got ndims: ", ndims);
}
}
// TODO: improve error messages
void THNN_(MultiLabelMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
THCTensor *istarget,
int64_t reduction)
{
THNN_(MultiLabelMarginCriterion_shapeCheck)(state, input, target);
if (input->numel() == 0) {
return;
}
input = THCTensor_(newContiguous)(state, input);
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
THCTensor_(resizeAs)(state, istarget, target);
if(input->dim() <= 1)
{
int dim = input->dim() == 0 ? 1 : input->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
THCTensor_(resize0d)(state, output);
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, dim,
reduction == at::Reduction::Mean
);
THCudaCheck(hipGetLastError());
}
else if(input->dim() == 2)
{
int nframe = input->size(0);
int dim = input->size(1);
dim3 blocks(input->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
if (reduction != at::Reduction::None)
{
THCTensor *output_tmp = THCTensor_(newWithSize1d)(state, input->size(0));
THCTensor_(resize0d)(state, output);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, output_tmp),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
reduction == at::Reduction::Mean
);
THCudaCheck(hipGetLastError());
auto t = THTensor_wrap(output_tmp);
auto r = THTensor_wrap(output);
at::native::sum_out(r, t, at::IntArrayRef(std::vector<int64_t>{}), false, r.scalar_type());
THCTensor_(free)(state, output_tmp);
}
else
{
THCTensor_(resize1d)(state, output, input->size(0));
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
false
);
THCudaCheck(hipGetLastError());
}
}
else {
TORCH_CHECK(false, "Expected 2D input with optional zero batch dim, or 1D input with non-zero dims, but got sizes: ",
input->sizes());
}
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
}
void THNN_(MultiLabelMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *istarget,
int64_t reduction)
{
THNN_(MultiLabelMarginCriterion_shapeCheck)(state, input, target);
input = THCTensor_(newContiguous)(state, input);
THCTensor_(resizeAs)(state, gradInput, input);
if (input->numel() == 0) {
THCTensor_(free)(state, input);
return;
}
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
if(gradInput->dim() <= 1)
{
int dim = gradInput->dim() == 0 ? 1 : gradInput->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
THArgCheck(!target->is_empty() && (target->dim() <= 1) && (target_size == dim), 3,
"inconsistent target size");
TORCH_CHECK(target->sizes() == istarget->sizes(), "inconsistent isTarget size");
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, dim,
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
}
else if(gradInput->dim() == 2)
{
int nframe = gradInput->size(0);
int dim = gradInput->size(1);
THArgCheck((input->size(1) != 0) && (target->dim() == 2) && (target->size(0) == nframe)
&& (target->size(1) == dim), 3, "inconsistent target size");
THArgCheck((istarget->dim() == 2) && (istarget->size(0) == nframe)
&& (istarget->size(1) == dim), 3, "inconsistent isTarget size");
dim3 blocks(gradInput->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>)
, dim3(blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
gradInput->size(0), gradInput->size(1),
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
}
else {
TORCH_CHECK(false, "Expected 2D input with optional zero batch dim, or 1D input with non-zero dims, but got sizes: ",
gradInput->sizes());
}
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
THCTensor_(free)(state, gradOutput);
}
#endif
| 289d57d384fa4b4dce6a12c3b5be951ce4f36eff.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/MultiLabelMarginCriterion.cu"
#else
static inline void THNN_(MultiLabelMarginCriterion_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *target) {
int64_t ndims = input->dim();
bool valid_inputs = (ndims == 2 && input->size(1) != 0) || (ndims == 1 && input->size(0) != 0) || ndims == 0;
TORCH_CHECK(
valid_inputs,
"Expected non-empty vector or matrix with optional 0-dim batch size, but got: ",
input->sizes());
if (ndims <= 1) {
int dim = input->dim() == 0 ? 1 : input->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
TORCH_CHECK(valid_inputs && target->dim() <= 1 && target->numel() == dim,
"inconsistent target size: ", target->sizes(), " for input of size: ", input->sizes());
} else if (ndims == 2) {
int nframe = input->size(0);
int dim = input->size(1);
TORCH_CHECK(
valid_inputs && target->dim() == 2 && target->size(0) == nframe && target->size(1) == dim,
"inconsistent target size: ", target->sizes(), " for input of size: ", input->sizes());
} else {
TORCH_CHECK(false, "Expected input of ndims <= 2, but got ndims: ", ndims);
}
}
// TODO: improve error messages
void THNN_(MultiLabelMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
THCTensor *istarget,
int64_t reduction)
{
THNN_(MultiLabelMarginCriterion_shapeCheck)(state, input, target);
if (input->numel() == 0) {
return;
}
input = THCTensor_(newContiguous)(state, input);
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
THCTensor_(resizeAs)(state, istarget, target);
if(input->dim() <= 1)
{
int dim = input->dim() == 0 ? 1 : input->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
THCTensor_(resize0d)(state, output);
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, dim,
reduction == at::Reduction::Mean
);
THCudaCheck(cudaGetLastError());
}
else if(input->dim() == 2)
{
int nframe = input->size(0);
int dim = input->size(1);
dim3 blocks(input->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
if (reduction != at::Reduction::None)
{
THCTensor *output_tmp = THCTensor_(newWithSize1d)(state, input->size(0));
THCTensor_(resize0d)(state, output);
cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
THCTensor_(data)(state, output_tmp),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
reduction == at::Reduction::Mean
);
THCudaCheck(cudaGetLastError());
auto t = THTensor_wrap(output_tmp);
auto r = THTensor_wrap(output);
at::native::sum_out(r, t, at::IntArrayRef(std::vector<int64_t>{}), false, r.scalar_type());
THCTensor_(free)(state, output_tmp);
}
else
{
THCTensor_(resize1d)(state, output, input->size(0));
cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
nframe, dim,
false
);
THCudaCheck(cudaGetLastError());
}
}
else {
TORCH_CHECK(false, "Expected 2D input with optional zero batch dim, or 1D input with non-zero dims, but got sizes: ",
input->sizes());
}
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
}
void THNN_(MultiLabelMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *istarget,
int64_t reduction)
{
THNN_(MultiLabelMarginCriterion_shapeCheck)(state, input, target);
input = THCTensor_(newContiguous)(state, input);
THCTensor_(resizeAs)(state, gradInput, input);
if (input->numel() == 0) {
THCTensor_(free)(state, input);
return;
}
target = THCIndexTensor_(newContiguous)(state, target);
istarget = THCTensor_(newContiguous)(state, istarget);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
if(gradInput->dim() <= 1)
{
int dim = gradInput->dim() == 0 ? 1 : gradInput->size(0);
int target_size = target->dim() == 0 ? 1 : target->size(0);
THArgCheck(!target->is_empty() && (target->dim() <= 1) && (target_size == dim), 3,
"inconsistent target size");
TORCH_CHECK(target->sizes() == istarget->sizes(), "inconsistent isTarget size");
dim3 blocks(1);
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
1, dim,
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
}
else if(gradInput->dim() == 2)
{
int nframe = gradInput->size(0);
int dim = gradInput->size(1);
THArgCheck((input->size(1) != 0) && (target->dim() == 2) && (target->size(0) == nframe)
&& (target->size(1) == dim), 3, "inconsistent target size");
THArgCheck((istarget->dim() == 2) && (istarget->size(0) == nframe)
&& (istarget->size(1) == dim), 3, "inconsistent isTarget size");
dim3 blocks(gradInput->size(0));
dim3 threads(MULTILABELMARGIN_THREADS);
cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>
<<<blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
THCTensor_(data)(state, istarget),
gradInput->size(0), gradInput->size(1),
reduction == at::Reduction::Mean,
reduction != at::Reduction::None);
}
else {
TORCH_CHECK(false, "Expected 2D input with optional zero batch dim, or 1D input with non-zero dims, but got sizes: ",
gradInput->sizes());
}
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
THCIndexTensor_(free)(state, target);
THCTensor_(free)(state, istarget);
THCTensor_(free)(state, gradOutput);
}
#endif
|
92e471f2ff1a426ba0e4670236b27169e12dabf7.hip | // !!! This is a file automatically generated by hipify!!!
#include <primitiv/config.h>
#include <cstring>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace primitiv {
namespace devices {
void CUDA16::conv2d_fw_impl(
const Tensor &x, const Tensor &w,
std::uint32_t padding0, std::uint32_t padding1,
std::uint32_t stride0, std::uint32_t stride1,
std::uint32_t dilation0, std::uint32_t dilation1,
Tensor &y) {
#ifdef PRIMITIV_USE_CUDNN
const Shape x_shape = x.shape();
const Shape w_shape = w.shape();
const Shape y_shape = y.shape();
// Specifies a target device.
CUDA_CALL(::hipSetDevice(dev_id_));
// Prepares descriptors.
const cuda::CuDNNTensorDescriptor x_desc(
w_shape.has_batch() ? 1 : x_shape.batch(),
x_shape[2], x_shape[1], x_shape[0],
::CUDNN_DATA_HALF);
const cuda::CuDNNTensorDescriptor y_desc(
w_shape.has_batch() ? 1 : y_shape.batch(),
y_shape[2], y_shape[1], y_shape[0],
::CUDNN_DATA_HALF);
const cuda::CuDNNFilterDescriptor w_desc(
w_shape[3], w_shape[2], w_shape[1], w_shape[0],
::CUDNN_DATA_HALF);
const cuda::CuDNNConvolutionDescriptor conv_desc(
padding1, padding0, stride1, stride0, dilation1, dilation0,
::CUDNN_DATA_HALF);
// Obtains the most efficient algorithm.
::cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(::cudnnGetConvolutionForwardAlgorithm(
state_->cudnn.get(),
x_desc.get(), w_desc.get(), conv_desc.get(), y_desc.get(),
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Obtains workspace size/memory.
std::size_t ws_size;
CUDNN_CALL(::cudnnGetConvolutionForwardWorkspaceSize(
state_->cudnn.get(),
x_desc.get(), w_desc.get(), conv_desc.get(), y_desc.get(),
algo, &ws_size));
std::shared_ptr<void> ws_ptr = state_->pool.allocate(ws_size);
// Performs forward operations.
const std::size_t x_shift = x_shape.has_batch() * x_shape.volume();
const std::size_t w_shift = w_shape.volume();
const std::size_t y_shift = y_shape.volume();
const float alpha = 1.f;
const float beta = 0.f;
const half *x_ptr = CDATA(half, x);
const half *w_ptr = CDATA(half, w);
half *y_ptr = MDATA(half, y);
for (std::uint32_t bn = 0; bn < w_shape.batch(); ++bn) {
CUDNN_CALL(::cudnnConvolutionForward(
state_->cudnn.get(),
&alpha, x_desc.get(), x_ptr, w_desc.get(), w_ptr,
conv_desc.get(), algo, ws_ptr.get(), ws_size,
&beta, y_desc.get(), y_ptr));
x_ptr += x_shift;
w_ptr += w_shift;
y_ptr += y_shift;
}
#else // PRIMITIV_USE_CUDNN
static_cast<void>(x);
static_cast<void>(w);
static_cast<void>(padding0);
static_cast<void>(padding1);
static_cast<void>(stride0);
static_cast<void>(stride1);
static_cast<void>(dilation0);
static_cast<void>(dilation1);
static_cast<void>(y);
PRIMITIV_THROW_NOT_IMPLEMENTED;
#endif // PRIMITIV_USE_CUDNN
}
void CUDA16::conv2d_bw_impl(
const Tensor &x, const Tensor &w, const Tensor &, const Tensor &gy,
std::uint32_t padding0, std::uint32_t padding1,
std::uint32_t stride0, std::uint32_t stride1,
std::uint32_t dilation0, std::uint32_t dilation1,
Tensor &gx, Tensor &gw) {
#ifdef PRIMITIV_USE_CUDNN
const Shape x_shape = x.shape();
const Shape w_shape = w.shape();
const Shape y_shape = gy.shape();
// Specifies a target device.
CUDA_CALL(::hipSetDevice(dev_id_));
// Prepares descriptors.
const cuda::CuDNNTensorDescriptor x_desc(
w_shape.has_batch() ? 1 : x_shape.batch(),
x_shape[2], x_shape[1], x_shape[0],
::CUDNN_DATA_HALF);
const cuda::CuDNNTensorDescriptor y_desc(
w_shape.has_batch() ? 1 : y_shape.batch(),
y_shape[2], y_shape[1], y_shape[0],
::CUDNN_DATA_HALF);
const cuda::CuDNNFilterDescriptor w_desc(
w_shape[3], w_shape[2], w_shape[1], w_shape[0],
::CUDNN_DATA_HALF);
const cuda::CuDNNConvolutionDescriptor conv_desc(
padding1, padding0, stride1, stride0, dilation1, dilation0,
::CUDNN_DATA_HALF);
// Obtains the most efficient algorithms.
::cudnnConvolutionBwdDataAlgo_t x_algo;
::cudnnConvolutionBwdFilterAlgo_t w_algo;
CUDNN_CALL(::cudnnGetConvolutionBackwardDataAlgorithm(
state_->cudnn.get(),
w_desc.get(), y_desc.get(), conv_desc.get(), x_desc.get(),
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &x_algo));
CUDNN_CALL(::cudnnGetConvolutionBackwardFilterAlgorithm(
state_->cudnn.get(),
x_desc.get(), y_desc.get(), conv_desc.get(), w_desc.get(),
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &w_algo));
// Obtains workspace sizes/memory.
std::size_t x_ws_size, w_ws_size;
CUDNN_CALL(::cudnnGetConvolutionBackwardDataWorkspaceSize(
state_->cudnn.get(),
w_desc.get(), y_desc.get(), conv_desc.get(), x_desc.get(),
x_algo, &x_ws_size));
CUDNN_CALL(::cudnnGetConvolutionBackwardFilterWorkspaceSize(
state_->cudnn.get(),
x_desc.get(), y_desc.get(), conv_desc.get(), w_desc.get(),
w_algo, &w_ws_size));
const std::size_t ws_size = ::max(x_ws_size, w_ws_size);
std::shared_ptr<void> ws_ptr = state_->pool.allocate(ws_size);
// Performs backward operations.
const std::size_t x_shift = x_shape.has_batch() * x_shape.volume();
const std::size_t w_shift = w_shape.volume();
const std::size_t y_shift = y_shape.volume();
const float alpha = 1.f;
const float beta = 1.f;
const half *x_ptr = CDATA(half, x);
const half *w_ptr = CDATA(half, w);
const half *gy_ptr = CDATA(half, gy);
half *gx_ptr = MDATA(half, gx);
half *gw_ptr = MDATA(half, gw);
for (std::uint32_t bn = 0; bn < w_shape.batch(); ++bn) {
CUDNN_CALL(::cudnnConvolutionBackwardData(
state_->cudnn.get(),
&alpha, w_desc.get(), w_ptr, y_desc.get(), gy_ptr,
conv_desc.get(), x_algo, ws_ptr.get(), ws_size,
&beta, x_desc.get(), gx_ptr));
CUDNN_CALL(::cudnnConvolutionBackwardFilter(
state_->cudnn.get(),
&alpha, x_desc.get(), x_ptr, y_desc.get(), gy_ptr,
conv_desc.get(), w_algo, ws_ptr.get(), ws_size,
&beta, w_desc.get(), gw_ptr));
x_ptr += x_shift;
w_ptr += w_shift;
gy_ptr += y_shift;
gx_ptr += x_shift;
gw_ptr += w_shift;
}
#else // PRIMITIV_USE_CUDNN
static_cast<void>(x);
static_cast<void>(w);
static_cast<void>(gy);
static_cast<void>(padding0);
static_cast<void>(padding1);
static_cast<void>(stride0);
static_cast<void>(stride1);
static_cast<void>(dilation0);
static_cast<void>(dilation1);
static_cast<void>(gx);
static_cast<void>(gw);
PRIMITIV_THROW_NOT_IMPLEMENTED;
#endif // PRIMITIV_USE_CUDNN
}
} // namespace devices
} // namespace primitiv
| 92e471f2ff1a426ba0e4670236b27169e12dabf7.cu | #include <primitiv/config.h>
#include <cstring>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace primitiv {
namespace devices {
void CUDA16::conv2d_fw_impl(
const Tensor &x, const Tensor &w,
std::uint32_t padding0, std::uint32_t padding1,
std::uint32_t stride0, std::uint32_t stride1,
std::uint32_t dilation0, std::uint32_t dilation1,
Tensor &y) {
#ifdef PRIMITIV_USE_CUDNN
const Shape x_shape = x.shape();
const Shape w_shape = w.shape();
const Shape y_shape = y.shape();
// Specifies a target device.
CUDA_CALL(::cudaSetDevice(dev_id_));
// Prepares descriptors.
const cuda::CuDNNTensorDescriptor x_desc(
w_shape.has_batch() ? 1 : x_shape.batch(),
x_shape[2], x_shape[1], x_shape[0],
::CUDNN_DATA_HALF);
const cuda::CuDNNTensorDescriptor y_desc(
w_shape.has_batch() ? 1 : y_shape.batch(),
y_shape[2], y_shape[1], y_shape[0],
::CUDNN_DATA_HALF);
const cuda::CuDNNFilterDescriptor w_desc(
w_shape[3], w_shape[2], w_shape[1], w_shape[0],
::CUDNN_DATA_HALF);
const cuda::CuDNNConvolutionDescriptor conv_desc(
padding1, padding0, stride1, stride0, dilation1, dilation0,
::CUDNN_DATA_HALF);
// Obtains the most efficient algorithm.
::cudnnConvolutionFwdAlgo_t algo;
CUDNN_CALL(::cudnnGetConvolutionForwardAlgorithm(
state_->cudnn.get(),
x_desc.get(), w_desc.get(), conv_desc.get(), y_desc.get(),
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo));
// Obtains workspace size/memory.
std::size_t ws_size;
CUDNN_CALL(::cudnnGetConvolutionForwardWorkspaceSize(
state_->cudnn.get(),
x_desc.get(), w_desc.get(), conv_desc.get(), y_desc.get(),
algo, &ws_size));
std::shared_ptr<void> ws_ptr = state_->pool.allocate(ws_size);
// Performs forward operations.
const std::size_t x_shift = x_shape.has_batch() * x_shape.volume();
const std::size_t w_shift = w_shape.volume();
const std::size_t y_shift = y_shape.volume();
const float alpha = 1.f;
const float beta = 0.f;
const half *x_ptr = CDATA(half, x);
const half *w_ptr = CDATA(half, w);
half *y_ptr = MDATA(half, y);
for (std::uint32_t bn = 0; bn < w_shape.batch(); ++bn) {
CUDNN_CALL(::cudnnConvolutionForward(
state_->cudnn.get(),
&alpha, x_desc.get(), x_ptr, w_desc.get(), w_ptr,
conv_desc.get(), algo, ws_ptr.get(), ws_size,
&beta, y_desc.get(), y_ptr));
x_ptr += x_shift;
w_ptr += w_shift;
y_ptr += y_shift;
}
#else // PRIMITIV_USE_CUDNN
static_cast<void>(x);
static_cast<void>(w);
static_cast<void>(padding0);
static_cast<void>(padding1);
static_cast<void>(stride0);
static_cast<void>(stride1);
static_cast<void>(dilation0);
static_cast<void>(dilation1);
static_cast<void>(y);
PRIMITIV_THROW_NOT_IMPLEMENTED;
#endif // PRIMITIV_USE_CUDNN
}
void CUDA16::conv2d_bw_impl(
const Tensor &x, const Tensor &w, const Tensor &, const Tensor &gy,
std::uint32_t padding0, std::uint32_t padding1,
std::uint32_t stride0, std::uint32_t stride1,
std::uint32_t dilation0, std::uint32_t dilation1,
Tensor &gx, Tensor &gw) {
#ifdef PRIMITIV_USE_CUDNN
const Shape x_shape = x.shape();
const Shape w_shape = w.shape();
const Shape y_shape = gy.shape();
// Specifies a target device.
CUDA_CALL(::cudaSetDevice(dev_id_));
// Prepares descriptors.
const cuda::CuDNNTensorDescriptor x_desc(
w_shape.has_batch() ? 1 : x_shape.batch(),
x_shape[2], x_shape[1], x_shape[0],
::CUDNN_DATA_HALF);
const cuda::CuDNNTensorDescriptor y_desc(
w_shape.has_batch() ? 1 : y_shape.batch(),
y_shape[2], y_shape[1], y_shape[0],
::CUDNN_DATA_HALF);
const cuda::CuDNNFilterDescriptor w_desc(
w_shape[3], w_shape[2], w_shape[1], w_shape[0],
::CUDNN_DATA_HALF);
const cuda::CuDNNConvolutionDescriptor conv_desc(
padding1, padding0, stride1, stride0, dilation1, dilation0,
::CUDNN_DATA_HALF);
// Obtains the most efficient algorithms.
::cudnnConvolutionBwdDataAlgo_t x_algo;
::cudnnConvolutionBwdFilterAlgo_t w_algo;
CUDNN_CALL(::cudnnGetConvolutionBackwardDataAlgorithm(
state_->cudnn.get(),
w_desc.get(), y_desc.get(), conv_desc.get(), x_desc.get(),
CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &x_algo));
CUDNN_CALL(::cudnnGetConvolutionBackwardFilterAlgorithm(
state_->cudnn.get(),
x_desc.get(), y_desc.get(), conv_desc.get(), w_desc.get(),
CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &w_algo));
// Obtains workspace sizes/memory.
std::size_t x_ws_size, w_ws_size;
CUDNN_CALL(::cudnnGetConvolutionBackwardDataWorkspaceSize(
state_->cudnn.get(),
w_desc.get(), y_desc.get(), conv_desc.get(), x_desc.get(),
x_algo, &x_ws_size));
CUDNN_CALL(::cudnnGetConvolutionBackwardFilterWorkspaceSize(
state_->cudnn.get(),
x_desc.get(), y_desc.get(), conv_desc.get(), w_desc.get(),
w_algo, &w_ws_size));
const std::size_t ws_size = std::max(x_ws_size, w_ws_size);
std::shared_ptr<void> ws_ptr = state_->pool.allocate(ws_size);
// Performs backward operations.
const std::size_t x_shift = x_shape.has_batch() * x_shape.volume();
const std::size_t w_shift = w_shape.volume();
const std::size_t y_shift = y_shape.volume();
const float alpha = 1.f;
const float beta = 1.f;
const half *x_ptr = CDATA(half, x);
const half *w_ptr = CDATA(half, w);
const half *gy_ptr = CDATA(half, gy);
half *gx_ptr = MDATA(half, gx);
half *gw_ptr = MDATA(half, gw);
for (std::uint32_t bn = 0; bn < w_shape.batch(); ++bn) {
CUDNN_CALL(::cudnnConvolutionBackwardData(
state_->cudnn.get(),
&alpha, w_desc.get(), w_ptr, y_desc.get(), gy_ptr,
conv_desc.get(), x_algo, ws_ptr.get(), ws_size,
&beta, x_desc.get(), gx_ptr));
CUDNN_CALL(::cudnnConvolutionBackwardFilter(
state_->cudnn.get(),
&alpha, x_desc.get(), x_ptr, y_desc.get(), gy_ptr,
conv_desc.get(), w_algo, ws_ptr.get(), ws_size,
&beta, w_desc.get(), gw_ptr));
x_ptr += x_shift;
w_ptr += w_shift;
gy_ptr += y_shift;
gx_ptr += x_shift;
gw_ptr += w_shift;
}
#else // PRIMITIV_USE_CUDNN
static_cast<void>(x);
static_cast<void>(w);
static_cast<void>(gy);
static_cast<void>(padding0);
static_cast<void>(padding1);
static_cast<void>(stride0);
static_cast<void>(stride1);
static_cast<void>(dilation0);
static_cast<void>(dilation1);
static_cast<void>(gx);
static_cast<void>(gw);
PRIMITIV_THROW_NOT_IMPLEMENTED;
#endif // PRIMITIV_USE_CUDNN
}
} // namespace devices
} // namespace primitiv
|
9405a017f31b65f6d7e81bebe16d81364bd2b78d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2014 BVLC and contributors.
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/downsample_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#define CUDART_NAN_F __int_as_float(0x7fffffff)
//using namespace std;
//using std::round;
namespace caffe {
template<typename Dtype>
void visualize_ds(Dtype *data, int width, int height, std::string img_key) {
// cv::Mat img1(cv::Size(width, height), CV_8UC3), img2(cv::Size(width, height), CV_8UC3);
cv::Mat flow_x(cv::Size(width, height), CV_8UC1), flow_y(cv::Size(width, height), CV_8UC1);
//flow_x
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++) {
unsigned char &grey = flow_x.at<uchar>(y, x);
grey = (unsigned char) data[y * width + x];
}
//flow_y
int flow_y_offset = height * width;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++) {
unsigned char &grey = flow_y.at<uchar>(y, x);
grey = (unsigned char) data[flow_y_offset + y * width + x];
}
cv::imwrite("visualize/" + img_key.substr(0, 10) + "_ds_flow_x.jpg", flow_x);
cv::imwrite("visualize/" + img_key.substr(0, 10) + "_ds_flow_y.jpg", flow_y);
LOG(INFO) << "Img:" << img_key.substr(0, 10) << " wrote.";
}
template <typename Dtype>
__global__ void DownsampleFeatures(const int nthreads, const int num, const int channels, const int bottomwidth, const int bottomheight,
const int topheight, const int topwidth, const int bot_countpernum, const int bot_numstride, const float widthScale, const float heightScale, const int wradius, const int hradius, const Dtype* src_data, Dtype* dest_data) {
// CUDA_KERNEL_LOOP(index, nthreads) {
// // From top (large,src_data) to bottom (small,dst_data)
//
// int destx = index % topwidth; //w-pos
// int desty = (index / topwidth) % topheight; //h-pos
//
// int cn = (index / topwidth / topheight);
// int c = cn % channels; //channel
// int n = cn / channels; //num
//
// //Compute source center pos in topdiff
// float botx = ((float)destx/(float)(topwidth-1)) * (float)(bottomwidth-1); // \in [0.0, (topwidth-1)]
// float boty = ((float)desty/(float)(topheight-1)) * (float)(bottomheight-1);
//
// int ibotx = round(botx);
// int iboty = round(boty);
//
// //printf("dest x,y[%d,%d] n:%d c:%d = idx:%d/%d | bot x,y[%d,%d]\n", destx, desty, n, c, index, nthreads, ibotx, iboty);
//
// // Accumulate in range around that point:
// int botidxoffcn = (bot_numstride*n) + (bottomwidth*bottomheight*c);
//
// float accum_value = 0;
// float accum_weight = 0;
// float accum_nan = 0;
//
// for(int yoff = -hradius; yoff <= hradius; yoff++) {
// int by = iboty + yoff;
// int botidxoffycn = by*bottomwidth + botidxoffcn;
// for(int xoff = -wradius; xoff <= wradius; xoff++) {
// int bx = ibotx + xoff;
//
// if(bx >= 0 && by >= 0 && bx < bottomwidth && by < bottomheight) {
// float sample = src_data[bx + botidxoffycn];
// float weight = max(0.0f,1.0f-(abs((float)bx - botx)/widthScale)) * max(0.0f,1.0f- (abs((float)by - boty)/heightScale) );
// if(sample != sample) { //isnan
// accum_nan += weight;
// sample = 0;
// weight = 0;
// }
//
// accum_value += sample * weight;
// accum_weight += weight;
// }
// }
// }
// if(accum_nan / accum_weight > 0.5) {
// dest_data[index] = CUDART_NAN_F;
// } else {
// dest_data[index] = accum_value / accum_weight;
// }
//
// //printf("dest x,y[%d,%d] n:%d c:%d = idx:%d | bot x,y[%d,%d] (int)val %d\n", destx, desty, n, c, index, ibotx, iboty, (int)src_data[ibotx + (iboty*bottomwidth + botidxoffcn)]);
// //dest_data[index] = src_data[ibotx + (iboty*bottomwidth + botidxoffcn)];
// }
}
template <typename Dtype>
void DownsampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data(); // dest
int topwidth = top[0]->width();
int topheight = top[0]->height();
int topchannels = top[0]->channels();
int topcount = top[0]->count();
/*TODO:Uncomment this to visualize
for(int n = 0; n < bottom[0]->num(); n++){
std::stringstream ss;
ss << n<<"_bt" ;
visualize_ds(bottom[0]->cpu_data(), bottom[0]->shape(3), bottom[0]->shape(2), ss.str());
LOG(INFO) << "ds bottom Shape: " << bottom[0]->shape(0)<< " " << bottom[0]->shape(1)<< " "<<
bottom[0]->shape(2)<<" "<< bottom[0]->shape(3);
}
*/
//LOG(INFO) << "Metrics: Tnum " << top[0]->num() << " Tchan " << topchannels << " Tw " << topwidth << " Th " << topheight;
const Dtype* bottom_data = bottom[0]->gpu_data(); // source
//LOG(INFO) << "Got ptr to bottom ";
int bottomnum = (bottom)[0]->num();
int bottomchannels = (bottom)[0]->channels();
int bottomwidth = (bottom)[0]->width();
int bottomheight = (bottom)[0]->height();
int bottomcount = (bottom)[0]->count();
if (bottomwidth != topwidth || bottomheight != topheight) {
// From bottom to top
int bot_countpernum = bottomwidth * bottomheight * bottomchannels;
int bot_numstride = bottomwidth * bottomheight * bottomchannels;
float widthScale = (float)(bottomwidth-1) / (float)(topwidth-1); // e.g. 2.0 if bottom pixeldist half compared to top.
float heightScale = (float)(bottomheight-1) / (float)(topheight-1);
const int wradius = ceil(widthScale); //One pixel from bottom is incfluenced by +- widthScale or heightScale pixels around that in top
const int hradius = ceil(heightScale);
// Loop over: bottomwidth,bottomheight,bottomlayers. (x,y,l)
// Accumulate data from top_diff_chanoffsetptr, at
// topx = (x/(bottomwidth-1)) [0.0, 1.0] * (topwidth-1) = [0.0, (topwidth-1)]
// topy = analogously
// in a rectangle around that point width range [-wradius,+wradius][-hradius,+hradius]
// and weight each toppixel with "closeness" (=max(0,1-xdist)*max(0,1-ydist)) to [topx,topy] but xdist and ydist scaled by widthScale and heightScale.
//LOG(INFO) << "Metrics: Bnum " << bottomnum << " Bchan " << bottomchannels << " Bw " << bottomwidth << " Bh " << bottomheight << " widSc " << widthScale << " hSc " << heightScale << " wrad " << wradius << " hrad " << hradius;
// caffe_gpu_memcpy(topcount * sizeof(Dtype), bottom_data, top_data);
hipLaunchKernelGGL(( DownsampleFeatures<Dtype>), dim3(CAFFE_GET_BLOCKS(topcount)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
topcount,
bottomnum, bottomchannels, bottomwidth, bottomheight,
topheight, topwidth, bot_countpernum, bot_numstride, widthScale, heightScale, wradius, hradius, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
/*TODO:Uncomment this to visualize
for(int n = 0; n < top[0]->num(); n++){
std::stringstream ss;
ss << top[0]->shape(3)<< "_" <<top[0]->shape(2)<< n;
visualize_ds(top[0]->cpu_data(), top[0]->shape(3), top[0]->shape(2), ss.str());
LOG(INFO) << "LABEL SPAPE: " << top[0]->shape(0)<< " " << top[0]->shape(1)<< " "<<
top[0]->shape(2)<<" "<< top[0]->shape(3);
}
*/
}
//*top_data = *bottom_data;
}
template <typename Dtype>
void DownsampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for(int i=0; i<propagate_down.size(); i++)
if(propagate_down[i])
LOG(FATAL) << "DownsamplingLayer cannot do backward.";
}
INSTANTIATE_LAYER_GPU_FUNCS(DownsampleLayer);
} // namespace caffe
| 9405a017f31b65f6d7e81bebe16d81364bd2b78d.cu | // Copyright 2014 BVLC and contributors.
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/downsample_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#define CUDART_NAN_F __int_as_float(0x7fffffff)
//using namespace std;
//using std::round;
namespace caffe {
template<typename Dtype>
void visualize_ds(Dtype *data, int width, int height, std::string img_key) {
// cv::Mat img1(cv::Size(width, height), CV_8UC3), img2(cv::Size(width, height), CV_8UC3);
cv::Mat flow_x(cv::Size(width, height), CV_8UC1), flow_y(cv::Size(width, height), CV_8UC1);
//flow_x
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++) {
unsigned char &grey = flow_x.at<uchar>(y, x);
grey = (unsigned char) data[y * width + x];
}
//flow_y
int flow_y_offset = height * width;
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++) {
unsigned char &grey = flow_y.at<uchar>(y, x);
grey = (unsigned char) data[flow_y_offset + y * width + x];
}
cv::imwrite("visualize/" + img_key.substr(0, 10) + "_ds_flow_x.jpg", flow_x);
cv::imwrite("visualize/" + img_key.substr(0, 10) + "_ds_flow_y.jpg", flow_y);
LOG(INFO) << "Img:" << img_key.substr(0, 10) << " wrote.";
}
template <typename Dtype>
__global__ void DownsampleFeatures(const int nthreads, const int num, const int channels, const int bottomwidth, const int bottomheight,
const int topheight, const int topwidth, const int bot_countpernum, const int bot_numstride, const float widthScale, const float heightScale, const int wradius, const int hradius, const Dtype* src_data, Dtype* dest_data) {
// CUDA_KERNEL_LOOP(index, nthreads) {
// // From top (large,src_data) to bottom (small,dst_data)
//
// int destx = index % topwidth; //w-pos
// int desty = (index / topwidth) % topheight; //h-pos
//
// int cn = (index / topwidth / topheight);
// int c = cn % channels; //channel
// int n = cn / channels; //num
//
// //Compute source center pos in topdiff
// float botx = ((float)destx/(float)(topwidth-1)) * (float)(bottomwidth-1); // \in [0.0, (topwidth-1)]
// float boty = ((float)desty/(float)(topheight-1)) * (float)(bottomheight-1);
//
// int ibotx = round(botx);
// int iboty = round(boty);
//
// //printf("dest x,y[%d,%d] n:%d c:%d = idx:%d/%d | bot x,y[%d,%d]\n", destx, desty, n, c, index, nthreads, ibotx, iboty);
//
// // Accumulate in range around that point:
// int botidxoffcn = (bot_numstride*n) + (bottomwidth*bottomheight*c);
//
// float accum_value = 0;
// float accum_weight = 0;
// float accum_nan = 0;
//
// for(int yoff = -hradius; yoff <= hradius; yoff++) {
// int by = iboty + yoff;
// int botidxoffycn = by*bottomwidth + botidxoffcn;
// for(int xoff = -wradius; xoff <= wradius; xoff++) {
// int bx = ibotx + xoff;
//
// if(bx >= 0 && by >= 0 && bx < bottomwidth && by < bottomheight) {
// float sample = src_data[bx + botidxoffycn];
// float weight = max(0.0f,1.0f-(abs((float)bx - botx)/widthScale)) * max(0.0f,1.0f- (abs((float)by - boty)/heightScale) );
// if(sample != sample) { //isnan
// accum_nan += weight;
// sample = 0;
// weight = 0;
// }
//
// accum_value += sample * weight;
// accum_weight += weight;
// }
// }
// }
// if(accum_nan / accum_weight > 0.5) {
// dest_data[index] = CUDART_NAN_F;
// } else {
// dest_data[index] = accum_value / accum_weight;
// }
//
// //printf("dest x,y[%d,%d] n:%d c:%d = idx:%d | bot x,y[%d,%d] (int)val %d\n", destx, desty, n, c, index, ibotx, iboty, (int)src_data[ibotx + (iboty*bottomwidth + botidxoffcn)]);
// //dest_data[index] = src_data[ibotx + (iboty*bottomwidth + botidxoffcn)];
// }
}
template <typename Dtype>
void DownsampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data(); // dest
int topwidth = top[0]->width();
int topheight = top[0]->height();
int topchannels = top[0]->channels();
int topcount = top[0]->count();
/*TODO:Uncomment this to visualize
for(int n = 0; n < bottom[0]->num(); n++){
std::stringstream ss;
ss << n<<"_bt" ;
visualize_ds(bottom[0]->cpu_data(), bottom[0]->shape(3), bottom[0]->shape(2), ss.str());
LOG(INFO) << "ds bottom Shape: " << bottom[0]->shape(0)<< " " << bottom[0]->shape(1)<< " "<<
bottom[0]->shape(2)<<" "<< bottom[0]->shape(3);
}
*/
//LOG(INFO) << "Metrics: Tnum " << top[0]->num() << " Tchan " << topchannels << " Tw " << topwidth << " Th " << topheight;
const Dtype* bottom_data = bottom[0]->gpu_data(); // source
//LOG(INFO) << "Got ptr to bottom ";
int bottomnum = (bottom)[0]->num();
int bottomchannels = (bottom)[0]->channels();
int bottomwidth = (bottom)[0]->width();
int bottomheight = (bottom)[0]->height();
int bottomcount = (bottom)[0]->count();
if (bottomwidth != topwidth || bottomheight != topheight) {
// From bottom to top
int bot_countpernum = bottomwidth * bottomheight * bottomchannels;
int bot_numstride = bottomwidth * bottomheight * bottomchannels;
float widthScale = (float)(bottomwidth-1) / (float)(topwidth-1); // e.g. 2.0 if bottom pixeldist half compared to top.
float heightScale = (float)(bottomheight-1) / (float)(topheight-1);
const int wradius = ceil(widthScale); //One pixel from bottom is incfluenced by +- widthScale or heightScale pixels around that in top
const int hradius = ceil(heightScale);
// Loop over: bottomwidth,bottomheight,bottomlayers. (x,y,l)
// Accumulate data from top_diff_chanoffsetptr, at
// topx = (x/(bottomwidth-1)) [0.0, 1.0] * (topwidth-1) = [0.0, (topwidth-1)]
// topy = analogously
// in a rectangle around that point width range [-wradius,+wradius][-hradius,+hradius]
// and weight each toppixel with "closeness" (=max(0,1-xdist)*max(0,1-ydist)) to [topx,topy] but xdist and ydist scaled by widthScale and heightScale.
//LOG(INFO) << "Metrics: Bnum " << bottomnum << " Bchan " << bottomchannels << " Bw " << bottomwidth << " Bh " << bottomheight << " widSc " << widthScale << " hSc " << heightScale << " wrad " << wradius << " hrad " << hradius;
// caffe_gpu_memcpy(topcount * sizeof(Dtype), bottom_data, top_data);
DownsampleFeatures<Dtype><<<CAFFE_GET_BLOCKS(topcount), CAFFE_CUDA_NUM_THREADS>>>(
topcount,
bottomnum, bottomchannels, bottomwidth, bottomheight,
topheight, topwidth, bot_countpernum, bot_numstride, widthScale, heightScale, wradius, hradius, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
/*TODO:Uncomment this to visualize
for(int n = 0; n < top[0]->num(); n++){
std::stringstream ss;
ss << top[0]->shape(3)<< "_" <<top[0]->shape(2)<< n;
visualize_ds(top[0]->cpu_data(), top[0]->shape(3), top[0]->shape(2), ss.str());
LOG(INFO) << "LABEL SPAPE: " << top[0]->shape(0)<< " " << top[0]->shape(1)<< " "<<
top[0]->shape(2)<<" "<< top[0]->shape(3);
}
*/
}
//*top_data = *bottom_data;
}
template <typename Dtype>
void DownsampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for(int i=0; i<propagate_down.size(); i++)
if(propagate_down[i])
LOG(FATAL) << "DownsamplingLayer cannot do backward.";
}
INSTANTIATE_LAYER_GPU_FUNCS(DownsampleLayer);
} // namespace caffe
|
9c14d7c1da01d4eba265afe64cf3bb1034484d53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void mapKernel(float* out, int functionCode, float frange_start, float dx) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
float x = frange_start + id * dx;
float y;
switch (functionCode) {
case 0: y = cos(x); break;
case 1: y = tan(x); break;
default: y = sin(x); break;
}
out[2 * id + 0] = x;
out[2 * id + 1] = y;
} | 9c14d7c1da01d4eba265afe64cf3bb1034484d53.cu | #include "includes.h"
__global__ void mapKernel(float* out, int functionCode, float frange_start, float dx) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
float x = frange_start + id * dx;
float y;
switch (functionCode) {
case 0: y = cos(x); break;
case 1: y = tan(x); break;
default: y = sin(x); break;
}
out[2 * id + 0] = x;
out[2 * id + 1] = y;
} |
8b1b5dfa3f9a2f2bb3b09d5a49e792292010c672.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
//better to access from a single process to avoid unknown behavior. so access only from master
hipError_t calculateNewPointPositionViaTime_Cuda(Point::PointAsStruct* pointsArr, int numberOfPoints, double currentTime);
__global__ void calcNewPosition(Point::PointAsStruct* pointsArr, int numberOfPoints, double currentTime)
{
int threadId = threadIdx.x;
int blockId = blockIdx.x;
int index = threadId + blockId * MAX_THREADS_FOR_CUDA;
if (index < numberOfPoints)
{
pointsArr[index].current_x = pointsArr[index].X0 + (currentTime * pointsArr[index].velocity_x);
pointsArr[index].current_y = pointsArr[index].Y0 + (currentTime * pointsArr[index].velocity_y);
pointsArr[index].current_z = pointsArr[index].Z0 + (currentTime * pointsArr[index].velocity_z);
}
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t calculateNewPointPositionViaTime_Cuda(Point::PointAsStruct* pointsArr, int numberOfPoints, double currentTime)
{
int numOfBlocksForCuda;
Point::PointAsStruct* pointsArr_device;
hipError_t cudaStatus;
numOfBlocksForCuda = 1 + ((numberOfPoints - 1) / MAX_THREADS_FOR_CUDA);
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
hipFree(pointsArr_device);
}
// Allocate GPU buffer for array of Points.
cudaStatus = hipMalloc((void**)&pointsArr_device, numberOfPoints * sizeof(Point::PointAsStruct));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
hipFree(pointsArr_device);
}
// Copy array of Points from host memory to GPU buffers.
cudaStatus = hipMemcpy(pointsArr_device, pointsArr, numberOfPoints * sizeof(Point::PointAsStruct), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
hipFree(pointsArr_device);
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( calcNewPosition), dim3(numOfBlocksForCuda), dim3(MAX_THREADS_FOR_CUDA), 0, 0, pointsArr_device, numberOfPoints, currentTime);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
hipFree(pointsArr_device);
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
hipFree(pointsArr_device);
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(pointsArr, pointsArr_device, numberOfPoints * sizeof(Point::PointAsStruct), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
hipFree(pointsArr_device);
}
return cudaStatus;
}
| 8b1b5dfa3f9a2f2bb3b09d5a49e792292010c672.cu |
#include "kernel.h"
//better to access from a single process to avoid unknown behavior. so access only from master
cudaError_t calculateNewPointPositionViaTime_Cuda(Point::PointAsStruct* pointsArr, int numberOfPoints, double currentTime);
__global__ void calcNewPosition(Point::PointAsStruct* pointsArr, int numberOfPoints, double currentTime)
{
int threadId = threadIdx.x;
int blockId = blockIdx.x;
int index = threadId + blockId * MAX_THREADS_FOR_CUDA;
if (index < numberOfPoints)
{
pointsArr[index].current_x = pointsArr[index].X0 + (currentTime * pointsArr[index].velocity_x);
pointsArr[index].current_y = pointsArr[index].Y0 + (currentTime * pointsArr[index].velocity_y);
pointsArr[index].current_z = pointsArr[index].Z0 + (currentTime * pointsArr[index].velocity_z);
}
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t calculateNewPointPositionViaTime_Cuda(Point::PointAsStruct* pointsArr, int numberOfPoints, double currentTime)
{
int numOfBlocksForCuda;
Point::PointAsStruct* pointsArr_device;
cudaError_t cudaStatus;
numOfBlocksForCuda = 1 + ((numberOfPoints - 1) / MAX_THREADS_FOR_CUDA);
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
cudaFree(pointsArr_device);
}
// Allocate GPU buffer for array of Points.
cudaStatus = cudaMalloc((void**)&pointsArr_device, numberOfPoints * sizeof(Point::PointAsStruct));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
cudaFree(pointsArr_device);
}
// Copy array of Points from host memory to GPU buffers.
cudaStatus = cudaMemcpy(pointsArr_device, pointsArr, numberOfPoints * sizeof(Point::PointAsStruct), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
cudaFree(pointsArr_device);
}
// Launch a kernel on the GPU with one thread for each element.
calcNewPosition<<<numOfBlocksForCuda, MAX_THREADS_FOR_CUDA>>>(pointsArr_device, numberOfPoints, currentTime);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
cudaFree(pointsArr_device);
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
cudaFree(pointsArr_device);
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(pointsArr, pointsArr_device, numberOfPoints * sizeof(Point::PointAsStruct), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
cudaFree(pointsArr_device);
}
return cudaStatus;
}
|
c4cea3e958227e55d1e292165294d442d7bb6e32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
typedef long long ll_t;
typedef unsigned char uint8_t;
extern "C"
__global__ void get_write_address(
const uint8_t* __restrict__ isEmpty,
const ll_t* __restrict__ divStart,
const ll_t* __restrict__ divSize,
const ll_t* __restrict__ labels,
const ll_t* __restrict__ ioa,
ll_t* __restrict__ write_adr,
int n_slots, int n_labels
) {
int tid = threadIdx.x; // thread ID
int lid = blockIdx.x * _TPB_ + tid; // label ID
if (lid < n_labels){
const ll_t threadLabel = labels[lid];
const ll_t threadIoa = ioa[lid];
const ll_t threadDivStart = divStart[threadLabel];
const ll_t threadDivSize = divSize[threadLabel];
ll_t counter = 0;
for (int i=0; i<threadDivSize; i++){
ll_t adr = threadDivStart + i;
if (adr < n_slots){
uint8_t empty = isEmpty[adr];
if (empty == 1){
if (counter == threadIoa){
write_adr[lid] = adr;
break;
}
counter ++;
}
}
} // end for i
}
} | c4cea3e958227e55d1e292165294d442d7bb6e32.cu | typedef long long ll_t;
typedef unsigned char uint8_t;
extern "C"
__global__ void get_write_address(
const uint8_t* __restrict__ isEmpty,
const ll_t* __restrict__ divStart,
const ll_t* __restrict__ divSize,
const ll_t* __restrict__ labels,
const ll_t* __restrict__ ioa,
ll_t* __restrict__ write_adr,
int n_slots, int n_labels
) {
int tid = threadIdx.x; // thread ID
int lid = blockIdx.x * _TPB_ + tid; // label ID
if (lid < n_labels){
const ll_t threadLabel = labels[lid];
const ll_t threadIoa = ioa[lid];
const ll_t threadDivStart = divStart[threadLabel];
const ll_t threadDivSize = divSize[threadLabel];
ll_t counter = 0;
for (int i=0; i<threadDivSize; i++){
ll_t adr = threadDivStart + i;
if (adr < n_slots){
uint8_t empty = isEmpty[adr];
if (empty == 1){
if (counter == threadIoa){
write_adr[lid] = adr;
break;
}
counter ++;
}
}
} // end for i
}
} |
9de31e2cad65f924a0716c5378ddeb10ba17c4ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <numeric>
#include "kernel/StreamCompaction.h"
#include "cuda/cudadefs.h"
#include "cuda/helper_math.h"
#include "cuda/cudautil.h"
#include "cuda/cudamemory.h"
// NOTE
// https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch39.html
// https://github.com/bcrusco/CUDA-Path-Tracer/blob/master/stream_compaction/efficient.cu
// exclusiveScan .
__global__ void computeBlockCount(
int32_t* dst,
int32_t num, // block count per grid used in exclusiveScan.
int32_t stride, // thread count per block used in exclusiveScan.
const int32_t* src0,
const int32_t* src1)
{
int32_t index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= num) {
return;
}
if (index == 0) {
dst[index] = 0;
}
else {
dst[index] = src0[index * stride - 1] + src1[index * stride - 1];
}
}
// exclusiveScan .
__global__ void incrementBlocks(
int32_t* data,
int32_t num,
const int32_t* incr) // value to increment for each blocks.
{
int32_t index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= num) {
return;
}
data[index] += incr[blockIdx.x];
}
__global__ void exclusiveScan(int32_t* dst, int32_t num, int32_t stride, const int32_t* src)
{
extern __shared__ int32_t temp[];
int32_t index = threadIdx.x;
int32_t offset = 1;
auto n = blockIdx.x * blockDim.x + threadIdx.x;
if (n * 2 >= num) {
return;
}
// Copy input data to shared memory
temp[2 * index] = src[2 * index + (blockIdx.x * blockDim.x * 2)];
temp[2 * index + 1] = src[2 * index + 1 + (blockIdx.x * blockDim.x * 2)];
// Up sweep
for (int32_t d = stride >> 1; d > 0; d >>= 1) {
__syncthreads();
if (index < d) {
int32_t ai = offset * (2 * index + 1) - 1;
int32_t bi = offset * (2 * index + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
// Clear the root
if (index == 0) {
temp[stride - 1] = 0;
}
// Down sweep
for (int32_t d = 1; d < stride; d *= 2) {
offset >>= 1;
__syncthreads();
if (index < d && offset > 0) {
int32_t ai = offset * (2 * index + 1) - 1;
int32_t bi = offset * (2 * index + 2) - 1;
int32_t t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
// Write to output array
dst[2 * index + (blockIdx.x * blockDim.x * 2)] = temp[2 * index];
dst[2 * index + 1 + (blockIdx.x * blockDim.x * 2)] = temp[2 * index + 1];
}
__global__ void scatter(
int32_t* dst,
int32_t* count,
int32_t num,
const int32_t* bools,
const int32_t* indices,
const int32_t* src)
{
int32_t idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= num) {
return;
}
if (bools[idx] > 0) {
int32_t pos = indices[idx];
dst[pos] = src[idx];
}
if (idx == 0) {
*count = bools[num - 1] + indices[num - 1];
}
}
namespace idaten
{
idaten::TypedCudaMemory<int32_t>& StreamCompaction::getCount()
{
return m_counts;
}
void StreamCompaction::init(
int32_t maxInputNum,
int32_t blockSize)
{
if (m_maxInputNum == 0) {
m_maxInputNum = maxInputNum;
m_blockSize = blockSize;
int32_t blockPerGrid = (maxInputNum - 1) / blockSize + 1;
m_increments.resize(blockPerGrid);
m_tmp.resize(blockPerGrid);
m_work.resize(blockPerGrid);
m_indices.resize(m_maxInputNum);
std::vector<int32_t> iota(m_maxInputNum);
std::iota(iota.begin(), iota.end(), 0);
m_iota.resize(iota.size());
m_iota.writeFromHostToDeviceByNum(&iota[0], iota.size());
m_counts.resize(1);
}
}
void StreamCompaction::clear()
{
m_maxInputNum = 0;
m_blockSize = 0;
m_increments.free();
m_tmp.free();
m_work.free();
m_indices.free();
m_iota.free();
m_counts.free();
}
void StreamCompaction::scan(
const int32_t blocksize,
idaten::TypedCudaMemory<int32_t>& src,
idaten::TypedCudaMemory<int32_t>& dst)
{
AT_ASSERT(dst.num() <= m_maxInputNum);
int32_t blockPerGrid = (dst.num() - 1) / blocksize + 1;
exclusiveScan << <blockPerGrid, blocksize / 2, blocksize * sizeof(int32_t), m_stream >> > (
dst.data(),
dst.num(),
blocksize,
src.data());
checkCudaKernel(exclusiveScan);
if (blockPerGrid <= 1) {
// If number of block is 1, finish.
return;
}
int32_t tmpBlockPerGrid = (blockPerGrid - 1) / blocksize + 1;
int32_t tmpBlockSize = blockPerGrid;
computeBlockCount << <tmpBlockPerGrid, tmpBlockSize, 0, m_stream >> > (
m_increments.data(),
m_increments.num(),
blocksize,
src.data(),
dst.data());
checkCudaKernel(computeBlockCount);
idaten::TypedCudaMemory<int32_t>* input = &m_increments;
idaten::TypedCudaMemory<int32_t>* output = &m_tmp;
idaten::TypedCudaMemory<int32_t>* tmpptr = &m_tmp;
int32_t elementNum = blockPerGrid;
int32_t count = 1;
int32_t innerBlockPerGrid = 0;
std::vector<int32_t> stackBlockPerGrid;
// Scan blocks.
for (;;) {
innerBlockPerGrid = (elementNum - 1) / blocksize + 1;
stackBlockPerGrid.push_back(elementNum);
exclusiveScan << <innerBlockPerGrid, blocksize / 2, blocksize * sizeof(int32_t), m_stream >> >(
m_work.data(),
m_work.num(),
blocksize,
input->data());
checkCudaKernel(iterate_exclusiveScan);
if (innerBlockPerGrid <= 1) {
//hipMemcpyAsync(tmp.data(), work.data(), work.bytes(), cudaMemcpyAsyncDeviceToDevice);
tmpptr = &m_work;
break;
}
int32_t innerTmpBlockPerGrid = (innerBlockPerGrid - 1) / blocksize + 1;
int32_t innerTmpBlockSize = innerBlockPerGrid;
computeBlockCount << <innerTmpBlockPerGrid, innerTmpBlockSize, 0, m_stream >> > (
output->data(),
output->num(),
blocksize,
input->data(),
m_work.data());
checkCudaKernel(iterate_computeBlockCount);
// swap.
auto p = input;
input = output;
output = p;
elementNum = innerBlockPerGrid;
count++;
}
#if 1
input = tmpptr;
output = &m_increments;
for (int32_t i = count - 1; i >= 0; i--) {
// blocks per grid.
auto bpg = stackBlockPerGrid[i];
auto threadPerBlock = (output->num() + bpg - 1) / bpg;
incrementBlocks << <bpg, threadPerBlock, 0, m_stream >> > (
output->data(),
output->num(),
input->data());
checkCudaKernel(iterate_incrementBlocks);
// swap.
auto p = input;
input = output;
output = p;
}
idaten::TypedCudaMemory<int32_t>* incrResult = (count & 0x1 == 0 ? tmpptr : &m_increments);
#endif
incrementBlocks << <blockPerGrid, blocksize, 0, m_stream >> > (
dst.data(),
dst.num(),
incrResult->data());
checkCudaKernel(incrementBlocks);
}
void StreamCompaction::compact(
idaten::TypedCudaMemory<int32_t>& dst,
idaten::TypedCudaMemory<int32_t>& bools,
int32_t* result/*= nullptr*/)
{
scan(m_blockSize, bools, m_indices);
int32_t num = dst.num();
int32_t blockPerGrid = (num - 1) / m_blockSize + 1;
scatter << <blockPerGrid, m_blockSize, 0, m_stream >> > (
dst.data(),
m_counts.data(),
dst.num(),
bools.data(),
m_indices.data(),
m_iota.data());
if (result) {
m_counts.readFromDeviceToHostByNum(result);
}
}
#if 0
// test implementation.
void StreamCompaction::compact()
{
#if 1
const int32_t blocksize = m_blockSize;
int32_t f[] = { 3, 1, 7, 0, 4, 1, 6, 3, 3, 1, 7, 0, 4, 1, 6, 3, 3, 1, 7, 0, 4, 1, 6, 3, 3, 1, 7, 0, 4, 1, 6, 3, 3, 1, 7, 0, 4, 1, 6, 3 };
//int32_t f[] = { 3, 1, 7, 0, 4, 1, 6, 3, 3, 1 };
//int32_t f[] = { 3, 1, 7, 0, 4, 1, 6, 3 };
//int32_t f[] = { 0, 25, 25, 25 };
//int32_t c = aten::nextPow2(AT_COUNTOF(f));
int32_t c = AT_COUNTOF(f);
std::vector<int32_t> x(c);
memcpy(&x[0], f, sizeof(int32_t) * AT_COUNTOF(f));
idaten::TypedCudaMemory<int32_t> src;
src.init(x.size());
src.writeFromHostToDeviceByNum(&x[0], x.size());
idaten::TypedCudaMemory<int32_t> dst;
dst.init(x.size());
scan(blocksize, src, dst);
std::vector<int32_t> buffer(x.size());
dst.readFromDeviceToHostByNum(&buffer[0]);
int32_t xxx = 0;
#else
const int32_t blocksize = m_blockSize;
int32_t b[] = { 1, 0, 1, 0, 1, 0, 1, 0 };
int32_t v[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
AT_ASSERT(AT_COUNTOF(b) == AT_COUNTOF(v));
int32_t num = AT_COUNTOF(b);
std::vector<int32_t> buffer(num);
idaten::TypedCudaMemory<int32_t> bools;
bools.init(num);
bools.writeFromHostToDeviceByNum(b, num);
idaten::TypedCudaMemory<int32_t> indices;
indices.init(num);
scan(blocksize, bools, indices);
indices.readFromDeviceToHostByNum(&buffer[0]);
idaten::TypedCudaMemory<int32_t> values;
values.init(num);
values.writeFromHostToDeviceByNum(v, num);
idaten::TypedCudaMemory<int32_t> dst;
dst.init(num);
idaten::TypedCudaMemory<int32_t> count;
count.init(1);
int32_t blockPerGrid = (num - 1) / blocksize + 1;
scatter << <blockPerGrid, blocksize >> > (
dst.data(),
count.data(),
dst.maxNum(),
bools.data(),
indices.data(),
values.data());
dst.readFromDeviceToHostByNum(&buffer[0]);
int32_t _count = -1;
count.readFromDeviceToHostByNum(&_count);
int32_t xxx = 0;
#endif
}
#endif
}
| 9de31e2cad65f924a0716c5378ddeb10ba17c4ff.cu | #include <vector>
#include <numeric>
#include "kernel/StreamCompaction.h"
#include "cuda/cudadefs.h"
#include "cuda/helper_math.h"
#include "cuda/cudautil.h"
#include "cuda/cudamemory.h"
// NOTE
// https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch39.html
// https://github.com/bcrusco/CUDA-Path-Tracer/blob/master/stream_compaction/efficient.cu
// ブロック単位で計算した exclusiveScan の総和値を足したものを計算する.
__global__ void computeBlockCount(
int32_t* dst,
int32_t num, // block count per grid used in exclusiveScan.
int32_t stride, // thread count per block used in exclusiveScan.
const int32_t* src0,
const int32_t* src1)
{
int32_t index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= num) {
return;
}
if (index == 0) {
dst[index] = 0;
}
else {
dst[index] = src0[index * stride - 1] + src1[index * stride - 1];
}
}
// ブロックごとに前のブロックまでの exclusiveScan の総和値を足したものを加算する.
__global__ void incrementBlocks(
int32_t* data,
int32_t num,
const int32_t* incr) // value to increment for each blocks.
{
int32_t index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= num) {
return;
}
data[index] += incr[blockIdx.x];
}
__global__ void exclusiveScan(int32_t* dst, int32_t num, int32_t stride, const int32_t* src)
{
extern __shared__ int32_t temp[];
int32_t index = threadIdx.x;
int32_t offset = 1;
auto n = blockIdx.x * blockDim.x + threadIdx.x;
if (n * 2 >= num) {
return;
}
// Copy input data to shared memory
temp[2 * index] = src[2 * index + (blockIdx.x * blockDim.x * 2)];
temp[2 * index + 1] = src[2 * index + 1 + (blockIdx.x * blockDim.x * 2)];
// Up sweep
for (int32_t d = stride >> 1; d > 0; d >>= 1) {
__syncthreads();
if (index < d) {
int32_t ai = offset * (2 * index + 1) - 1;
int32_t bi = offset * (2 * index + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
// Clear the root
if (index == 0) {
temp[stride - 1] = 0;
}
// Down sweep
for (int32_t d = 1; d < stride; d *= 2) {
offset >>= 1;
__syncthreads();
if (index < d && offset > 0) {
int32_t ai = offset * (2 * index + 1) - 1;
int32_t bi = offset * (2 * index + 2) - 1;
int32_t t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
// Write to output array
dst[2 * index + (blockIdx.x * blockDim.x * 2)] = temp[2 * index];
dst[2 * index + 1 + (blockIdx.x * blockDim.x * 2)] = temp[2 * index + 1];
}
__global__ void scatter(
int32_t* dst,
int32_t* count,
int32_t num,
const int32_t* bools,
const int32_t* indices,
const int32_t* src)
{
int32_t idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= num) {
return;
}
if (bools[idx] > 0) {
int32_t pos = indices[idx];
dst[pos] = src[idx];
}
if (idx == 0) {
*count = bools[num - 1] + indices[num - 1];
}
}
namespace idaten
{
idaten::TypedCudaMemory<int32_t>& StreamCompaction::getCount()
{
return m_counts;
}
void StreamCompaction::init(
int32_t maxInputNum,
int32_t blockSize)
{
if (m_maxInputNum == 0) {
m_maxInputNum = maxInputNum;
m_blockSize = blockSize;
int32_t blockPerGrid = (maxInputNum - 1) / blockSize + 1;
m_increments.resize(blockPerGrid);
m_tmp.resize(blockPerGrid);
m_work.resize(blockPerGrid);
m_indices.resize(m_maxInputNum);
std::vector<int32_t> iota(m_maxInputNum);
std::iota(iota.begin(), iota.end(), 0);
m_iota.resize(iota.size());
m_iota.writeFromHostToDeviceByNum(&iota[0], iota.size());
m_counts.resize(1);
}
}
void StreamCompaction::clear()
{
m_maxInputNum = 0;
m_blockSize = 0;
m_increments.free();
m_tmp.free();
m_work.free();
m_indices.free();
m_iota.free();
m_counts.free();
}
void StreamCompaction::scan(
const int32_t blocksize,
idaten::TypedCudaMemory<int32_t>& src,
idaten::TypedCudaMemory<int32_t>& dst)
{
AT_ASSERT(dst.num() <= m_maxInputNum);
int32_t blockPerGrid = (dst.num() - 1) / blocksize + 1;
exclusiveScan << <blockPerGrid, blocksize / 2, blocksize * sizeof(int32_t), m_stream >> > (
dst.data(),
dst.num(),
blocksize,
src.data());
checkCudaKernel(exclusiveScan);
if (blockPerGrid <= 1) {
// If number of block is 1, finish.
return;
}
int32_t tmpBlockPerGrid = (blockPerGrid - 1) / blocksize + 1;
int32_t tmpBlockSize = blockPerGrid;
computeBlockCount << <tmpBlockPerGrid, tmpBlockSize, 0, m_stream >> > (
m_increments.data(),
m_increments.num(),
blocksize,
src.data(),
dst.data());
checkCudaKernel(computeBlockCount);
idaten::TypedCudaMemory<int32_t>* input = &m_increments;
idaten::TypedCudaMemory<int32_t>* output = &m_tmp;
idaten::TypedCudaMemory<int32_t>* tmpptr = &m_tmp;
int32_t elementNum = blockPerGrid;
int32_t count = 1;
int32_t innerBlockPerGrid = 0;
std::vector<int32_t> stackBlockPerGrid;
// Scan blocks.
for (;;) {
innerBlockPerGrid = (elementNum - 1) / blocksize + 1;
stackBlockPerGrid.push_back(elementNum);
exclusiveScan << <innerBlockPerGrid, blocksize / 2, blocksize * sizeof(int32_t), m_stream >> >(
m_work.data(),
m_work.num(),
blocksize,
input->data());
checkCudaKernel(iterate_exclusiveScan);
if (innerBlockPerGrid <= 1) {
//cudaMemcpyAsync(tmp.data(), work.data(), work.bytes(), cudaMemcpyAsyncDeviceToDevice);
tmpptr = &m_work;
break;
}
int32_t innerTmpBlockPerGrid = (innerBlockPerGrid - 1) / blocksize + 1;
int32_t innerTmpBlockSize = innerBlockPerGrid;
computeBlockCount << <innerTmpBlockPerGrid, innerTmpBlockSize, 0, m_stream >> > (
output->data(),
output->num(),
blocksize,
input->data(),
m_work.data());
checkCudaKernel(iterate_computeBlockCount);
// swap.
auto p = input;
input = output;
output = p;
elementNum = innerBlockPerGrid;
count++;
}
#if 1
input = tmpptr;
output = &m_increments;
for (int32_t i = count - 1; i >= 0; i--) {
// blocks per grid.
auto bpg = stackBlockPerGrid[i];
auto threadPerBlock = (output->num() + bpg - 1) / bpg;
incrementBlocks << <bpg, threadPerBlock, 0, m_stream >> > (
output->data(),
output->num(),
input->data());
checkCudaKernel(iterate_incrementBlocks);
// swap.
auto p = input;
input = output;
output = p;
}
idaten::TypedCudaMemory<int32_t>* incrResult = (count & 0x1 == 0 ? tmpptr : &m_increments);
#endif
incrementBlocks << <blockPerGrid, blocksize, 0, m_stream >> > (
dst.data(),
dst.num(),
incrResult->data());
checkCudaKernel(incrementBlocks);
}
void StreamCompaction::compact(
idaten::TypedCudaMemory<int32_t>& dst,
idaten::TypedCudaMemory<int32_t>& bools,
int32_t* result/*= nullptr*/)
{
scan(m_blockSize, bools, m_indices);
int32_t num = dst.num();
int32_t blockPerGrid = (num - 1) / m_blockSize + 1;
scatter << <blockPerGrid, m_blockSize, 0, m_stream >> > (
dst.data(),
m_counts.data(),
dst.num(),
bools.data(),
m_indices.data(),
m_iota.data());
if (result) {
m_counts.readFromDeviceToHostByNum(result);
}
}
#if 0
// test implementation.
void StreamCompaction::compact()
{
#if 1
const int32_t blocksize = m_blockSize;
int32_t f[] = { 3, 1, 7, 0, 4, 1, 6, 3, 3, 1, 7, 0, 4, 1, 6, 3, 3, 1, 7, 0, 4, 1, 6, 3, 3, 1, 7, 0, 4, 1, 6, 3, 3, 1, 7, 0, 4, 1, 6, 3 };
//int32_t f[] = { 3, 1, 7, 0, 4, 1, 6, 3, 3, 1 };
//int32_t f[] = { 3, 1, 7, 0, 4, 1, 6, 3 };
//int32_t f[] = { 0, 25, 25, 25 };
//int32_t c = aten::nextPow2(AT_COUNTOF(f));
int32_t c = AT_COUNTOF(f);
std::vector<int32_t> x(c);
memcpy(&x[0], f, sizeof(int32_t) * AT_COUNTOF(f));
idaten::TypedCudaMemory<int32_t> src;
src.init(x.size());
src.writeFromHostToDeviceByNum(&x[0], x.size());
idaten::TypedCudaMemory<int32_t> dst;
dst.init(x.size());
scan(blocksize, src, dst);
std::vector<int32_t> buffer(x.size());
dst.readFromDeviceToHostByNum(&buffer[0]);
int32_t xxx = 0;
#else
const int32_t blocksize = m_blockSize;
int32_t b[] = { 1, 0, 1, 0, 1, 0, 1, 0 };
int32_t v[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
AT_ASSERT(AT_COUNTOF(b) == AT_COUNTOF(v));
int32_t num = AT_COUNTOF(b);
std::vector<int32_t> buffer(num);
idaten::TypedCudaMemory<int32_t> bools;
bools.init(num);
bools.writeFromHostToDeviceByNum(b, num);
idaten::TypedCudaMemory<int32_t> indices;
indices.init(num);
scan(blocksize, bools, indices);
indices.readFromDeviceToHostByNum(&buffer[0]);
idaten::TypedCudaMemory<int32_t> values;
values.init(num);
values.writeFromHostToDeviceByNum(v, num);
idaten::TypedCudaMemory<int32_t> dst;
dst.init(num);
idaten::TypedCudaMemory<int32_t> count;
count.init(1);
int32_t blockPerGrid = (num - 1) / blocksize + 1;
scatter << <blockPerGrid, blocksize >> > (
dst.data(),
count.data(),
dst.maxNum(),
bools.data(),
indices.data(),
values.data());
dst.readFromDeviceToHostByNum(&buffer[0]);
int32_t _count = -1;
count.readFromDeviceToHostByNum(&_count);
int32_t xxx = 0;
#endif
}
#endif
}
|
518d7f122b53026353f0ebdd47a2d4c42e165a8c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===------------ omp_data.cu - NVPTX OpenMP GPU objects --------- CUDA -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the data objects used on the GPU device.
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
////////////////////////////////////////////////////////////////////////////////
// global device envrionment
////////////////////////////////////////////////////////////////////////////////
__device__ omptarget_device_environmentTy omptarget_device_environment;
////////////////////////////////////////////////////////////////////////////////
// global data holding OpenMP state information
////////////////////////////////////////////////////////////////////////////////
__device__
omptarget_nvptx_Queue<omptarget_nvptx_ThreadPrivateContext, OMP_STATE_COUNT>
omptarget_nvptx_device_State[MAX_SM];
__device__ omptarget_nvptx_SimpleMemoryManager
omptarget_nvptx_simpleMemoryManager;
__device__ __shared__ uint32_t usedMemIdx;
__device__ __shared__ uint32_t usedSlotIdx;
__device__ __shared__ uint8_t parallelLevel[MAX_THREADS_PER_TEAM / WARPSIZE];
// Pointer to this team's OpenMP state object
__device__ __shared__
omptarget_nvptx_ThreadPrivateContext *omptarget_nvptx_threadPrivateContext;
////////////////////////////////////////////////////////////////////////////////
// The team master sets the outlined parallel function in this variable to
// communicate with the workers. Since it is in shared memory, there is one
// copy of these variables for each kernel, instance, and team.
////////////////////////////////////////////////////////////////////////////////
volatile __device__ __shared__ omptarget_nvptx_WorkFn omptarget_nvptx_workFn;
////////////////////////////////////////////////////////////////////////////////
// OpenMP kernel execution parameters
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ uint32_t execution_param;
////////////////////////////////////////////////////////////////////////////////
// Data sharing state
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ DataSharingStateTy DataSharingState;
////////////////////////////////////////////////////////////////////////////////
// Scratchpad for teams reduction.
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ void *ReductionScratchpadPtr;
////////////////////////////////////////////////////////////////////////////////
// Data sharing related variables.
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ omptarget_nvptx_SharedArgs omptarget_nvptx_globalArgs;
| 518d7f122b53026353f0ebdd47a2d4c42e165a8c.cu | //===------------ omp_data.cu - NVPTX OpenMP GPU objects --------- CUDA -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the data objects used on the GPU device.
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
////////////////////////////////////////////////////////////////////////////////
// global device envrionment
////////////////////////////////////////////////////////////////////////////////
__device__ omptarget_device_environmentTy omptarget_device_environment;
////////////////////////////////////////////////////////////////////////////////
// global data holding OpenMP state information
////////////////////////////////////////////////////////////////////////////////
__device__
omptarget_nvptx_Queue<omptarget_nvptx_ThreadPrivateContext, OMP_STATE_COUNT>
omptarget_nvptx_device_State[MAX_SM];
__device__ omptarget_nvptx_SimpleMemoryManager
omptarget_nvptx_simpleMemoryManager;
__device__ __shared__ uint32_t usedMemIdx;
__device__ __shared__ uint32_t usedSlotIdx;
__device__ __shared__ uint8_t parallelLevel[MAX_THREADS_PER_TEAM / WARPSIZE];
// Pointer to this team's OpenMP state object
__device__ __shared__
omptarget_nvptx_ThreadPrivateContext *omptarget_nvptx_threadPrivateContext;
////////////////////////////////////////////////////////////////////////////////
// The team master sets the outlined parallel function in this variable to
// communicate with the workers. Since it is in shared memory, there is one
// copy of these variables for each kernel, instance, and team.
////////////////////////////////////////////////////////////////////////////////
volatile __device__ __shared__ omptarget_nvptx_WorkFn omptarget_nvptx_workFn;
////////////////////////////////////////////////////////////////////////////////
// OpenMP kernel execution parameters
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ uint32_t execution_param;
////////////////////////////////////////////////////////////////////////////////
// Data sharing state
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ DataSharingStateTy DataSharingState;
////////////////////////////////////////////////////////////////////////////////
// Scratchpad for teams reduction.
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ void *ReductionScratchpadPtr;
////////////////////////////////////////////////////////////////////////////////
// Data sharing related variables.
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ omptarget_nvptx_SharedArgs omptarget_nvptx_globalArgs;
|
8bd06f516bd0ad154522c403501da42e8c9fe7aa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_addScalar.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_addScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_addScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_addScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8bd06f516bd0ad154522c403501da42e8c9fe7aa.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_addScalar.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_addScalar<<<gridBlock,threadBlock>>>(n,result,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_addScalar<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_addScalar<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d42185c5bc61c0ca9712fe010500c26fde25028d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernelVacio.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernelVacio), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernelVacio), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernelVacio), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d42185c5bc61c0ca9712fe010500c26fde25028d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernelVacio.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernelVacio<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernelVacio<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernelVacio<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1cb5ca4d2fc8b4b9b761978b510620f49af13e16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//iojpegparts.cu
#include <stdio.h>
#include <malloc.h>
#include <stdlib.h>
#include <math.h>
#include <jpeglib.h>
#include <sys/time.h>
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp,NULL);
return((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
int usage(char *name){
printf("Code to blur parts of image using GPUs.\n");
printf("Usage as follows: %s InputFileName OutputFileName MaskWidth PeakWidth\n",name);
exit(1);
}
__global__ void GaussianBlurCuda (unsigned char *pic, unsigned char * outpic, double *mask, int *size){ // size: width, height, mask_width
int pxPosCen = blockIdx.x * blockDim.x + threadIdx.x;
if (pxPosCen >= size[0]*size[1] || pxPosCen < 0) return;
int row, col, x, y, pos;
row = pxPosCen/size[0]; // pixel position taken as width major
col = pxPosCen%size[0];
double sumout[3];
sumout[0] = 0;
sumout[1] = 0;
sumout[2] = 0;
if (row < size[2]/2 || row >= (size[1] - (size[2]/2))) return;
if (col < size[2]/2 || col >= (size[0] - (size[2]/2))) return;
for (int i=0;i<size[2]*size[2];i++){
x = i%size[2] + col - size[2]/2;
y = i/size[2] + row - size[2]/2;
pos = (y*size[0] + x)*3;
sumout[0]+=(double)(*(pic+pos )) * mask[i];
sumout[1]+=(double)(*(pic+pos+1)) * mask[i];
sumout[2]+=(double)(*(pic+pos+2)) * mask[i];
}
pos = pxPosCen*3;
*(outpic+pos) = (unsigned char) sumout[0];
*(outpic+pos+1) = (unsigned char) sumout[1];
*(outpic+pos+2) = (unsigned char) sumout[2];
}
int main (int argc, char *argv[]){
if (argc != 5) usage(argv[0]);
int width, height;
char *name = argv[1];
char *out = argv[2];
int mask_width = atoi(argv[3]);
double peak_width = atof(argv[4]);
if (mask_width%2 !=1){
printf("Mask width must be odd.\n");
exit(1);
}
double tStart = cpuSecond();
FILE *infile = fopen(name,"rb");
FILE *outfile = fopen(out,"wb");
if (infile == NULL){
printf("Could not read file\n");
return 1;
}
struct jpeg_decompress_struct cinfo;
struct jpeg_compress_struct cinfo1;
struct jpeg_error_mgr jerr;
JSAMPARRAY pJpegBuffer;
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, infile);
jpeg_read_header(&cinfo, TRUE);
jpeg_start_decompress(&cinfo);
int row_stride = cinfo.output_width * cinfo.output_components;
width = cinfo.output_width;
height = cinfo.output_height;
unsigned char *pic, *outpic;
pic = (unsigned char *) malloc(width*height*3*sizeof(pic));
outpic = (unsigned char *) malloc(width*height*3*sizeof(outpic));
pJpegBuffer = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, row_stride, 1);
while (cinfo.output_scanline < cinfo.output_height) {
(void) jpeg_read_scanlines(&cinfo, pJpegBuffer, 1);
for (int x=0;x<width;x++) {
*(pic++) = pJpegBuffer[0][cinfo.output_components*x];
if (cinfo.output_components>2){
*(pic++) = pJpegBuffer[0][cinfo.output_components*x+1];
*(pic++) = pJpegBuffer[0][cinfo.output_components*x+2];
} else {
*(pic++) = *(pic-1);
*(pic++) = *(pic-1);
}
}
}
pic -= width*height*3;
fclose(infile);
(void) jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
double * mask;
mask = (double *) malloc(mask_width*mask_width*sizeof(mask));
int x,y,xcen=mask_width/2,ycen=xcen;
double a = 1/(peak_width*peak_width * 44/7), sum=0;
for (int i=0;i<mask_width*mask_width;i++){
x = i%mask_width;
y = i/mask_width;
mask[i] = a * exp(-(x-xcen)*(x-xcen)/(2*peak_width*peak_width)
-(y-ycen)*(y-ycen)/(2*peak_width*peak_width));
sum+=mask[i];
}
for (int i=0;i<mask_width*mask_width;i++){
mask[i] /= sum;
}
// CUDA work
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp,0);
size_t gpuGlobalMem = deviceProp.totalGlobalMem;
fprintf(stderr, "GPU global memory = %zu MBytes\n", gpuGlobalMem/(1024*1024));
size_t freeMem, totalMem;
hipMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
unsigned char *cudaPic, *cudaOutPic;
double *cudaMask;
int *sizeCuda, size[3];
size[0] = width;
size[1] = height;
size[2] = mask_width;
hipMalloc((int **)&sizeCuda,3*sizeof(int));
hipMalloc((unsigned char**)&cudaPic, width*height*3*sizeof(unsigned char));
hipMalloc((unsigned char**)&cudaOutPic, width*height*3*sizeof(unsigned char));
hipMalloc((double **)&cudaMask, mask_width*mask_width*sizeof(double));
hipMemcpy(sizeCuda,size,3*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(cudaPic,pic,width*height*3*sizeof(unsigned char),hipMemcpyHostToDevice);
hipMemcpy(cudaMask,mask,mask_width*mask_width*sizeof(double),hipMemcpyHostToDevice);
hipMemset(cudaOutPic,0,width*height*3*sizeof(unsigned char));
dim3 block (1024);
dim3 grid (((width*height)/block.x)+1);
printf("%d %d\n",block.x, grid.x);
hipLaunchKernelGGL(( GaussianBlurCuda), dim3(grid),dim3(block), 0, 0, cudaPic, cudaOutPic, cudaMask, sizeCuda);
hipDeviceSynchronize();
hipMemcpy(outpic, cudaOutPic, width*height*3*sizeof(unsigned char), hipMemcpyDeviceToHost);
// Output file structure
cinfo1.err = jpeg_std_error(&jerr);
jpeg_create_compress(&cinfo1);
jpeg_stdio_dest(&cinfo1, outfile);
cinfo1.image_width = width;
cinfo1.image_height = height;
cinfo1.input_components = 3;
cinfo1.in_color_space = JCS_RGB;
jpeg_set_defaults(&cinfo1);
int quality = 70;
jpeg_set_quality(&cinfo1, quality, TRUE);
jpeg_start_compress(&cinfo1, TRUE);
JSAMPROW row_pointer[1];
while(cinfo1.next_scanline < cinfo1.image_height){
row_pointer[0] = &outpic[cinfo1.next_scanline*width*3];
(void) jpeg_write_scanlines(&cinfo1, row_pointer, 1);
}
jpeg_finish_compress(&cinfo1);
fclose(outfile);
jpeg_destroy_compress(&cinfo1);
double tFinish = cpuSecond();
printf("Time elapsed: %lf seconds.\n",tFinish-tStart);
}
| 1cb5ca4d2fc8b4b9b761978b510620f49af13e16.cu | //iojpegparts.cu
#include <stdio.h>
#include <malloc.h>
#include <stdlib.h>
#include <math.h>
#include <jpeglib.h>
#include <sys/time.h>
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp,NULL);
return((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
int usage(char *name){
printf("Code to blur parts of image using GPUs.\n");
printf("Usage as follows: %s InputFileName OutputFileName MaskWidth PeakWidth\n",name);
exit(1);
}
__global__ void GaussianBlurCuda (unsigned char *pic, unsigned char * outpic, double *mask, int *size){ // size: width, height, mask_width
int pxPosCen = blockIdx.x * blockDim.x + threadIdx.x;
if (pxPosCen >= size[0]*size[1] || pxPosCen < 0) return;
int row, col, x, y, pos;
row = pxPosCen/size[0]; // pixel position taken as width major
col = pxPosCen%size[0];
double sumout[3];
sumout[0] = 0;
sumout[1] = 0;
sumout[2] = 0;
if (row < size[2]/2 || row >= (size[1] - (size[2]/2))) return;
if (col < size[2]/2 || col >= (size[0] - (size[2]/2))) return;
for (int i=0;i<size[2]*size[2];i++){
x = i%size[2] + col - size[2]/2;
y = i/size[2] + row - size[2]/2;
pos = (y*size[0] + x)*3;
sumout[0]+=(double)(*(pic+pos )) * mask[i];
sumout[1]+=(double)(*(pic+pos+1)) * mask[i];
sumout[2]+=(double)(*(pic+pos+2)) * mask[i];
}
pos = pxPosCen*3;
*(outpic+pos) = (unsigned char) sumout[0];
*(outpic+pos+1) = (unsigned char) sumout[1];
*(outpic+pos+2) = (unsigned char) sumout[2];
}
int main (int argc, char *argv[]){
if (argc != 5) usage(argv[0]);
int width, height;
char *name = argv[1];
char *out = argv[2];
int mask_width = atoi(argv[3]);
double peak_width = atof(argv[4]);
if (mask_width%2 !=1){
printf("Mask width must be odd.\n");
exit(1);
}
double tStart = cpuSecond();
FILE *infile = fopen(name,"rb");
FILE *outfile = fopen(out,"wb");
if (infile == NULL){
printf("Could not read file\n");
return 1;
}
struct jpeg_decompress_struct cinfo;
struct jpeg_compress_struct cinfo1;
struct jpeg_error_mgr jerr;
JSAMPARRAY pJpegBuffer;
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, infile);
jpeg_read_header(&cinfo, TRUE);
jpeg_start_decompress(&cinfo);
int row_stride = cinfo.output_width * cinfo.output_components;
width = cinfo.output_width;
height = cinfo.output_height;
unsigned char *pic, *outpic;
pic = (unsigned char *) malloc(width*height*3*sizeof(pic));
outpic = (unsigned char *) malloc(width*height*3*sizeof(outpic));
pJpegBuffer = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, row_stride, 1);
while (cinfo.output_scanline < cinfo.output_height) {
(void) jpeg_read_scanlines(&cinfo, pJpegBuffer, 1);
for (int x=0;x<width;x++) {
*(pic++) = pJpegBuffer[0][cinfo.output_components*x];
if (cinfo.output_components>2){
*(pic++) = pJpegBuffer[0][cinfo.output_components*x+1];
*(pic++) = pJpegBuffer[0][cinfo.output_components*x+2];
} else {
*(pic++) = *(pic-1);
*(pic++) = *(pic-1);
}
}
}
pic -= width*height*3;
fclose(infile);
(void) jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
double * mask;
mask = (double *) malloc(mask_width*mask_width*sizeof(mask));
int x,y,xcen=mask_width/2,ycen=xcen;
double a = 1/(peak_width*peak_width * 44/7), sum=0;
for (int i=0;i<mask_width*mask_width;i++){
x = i%mask_width;
y = i/mask_width;
mask[i] = a * exp(-(x-xcen)*(x-xcen)/(2*peak_width*peak_width)
-(y-ycen)*(y-ycen)/(2*peak_width*peak_width));
sum+=mask[i];
}
for (int i=0;i<mask_width*mask_width;i++){
mask[i] /= sum;
}
// CUDA work
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,0);
size_t gpuGlobalMem = deviceProp.totalGlobalMem;
fprintf(stderr, "GPU global memory = %zu MBytes\n", gpuGlobalMem/(1024*1024));
size_t freeMem, totalMem;
cudaMemGetInfo(&freeMem, &totalMem);
fprintf(stderr, "Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024));
unsigned char *cudaPic, *cudaOutPic;
double *cudaMask;
int *sizeCuda, size[3];
size[0] = width;
size[1] = height;
size[2] = mask_width;
cudaMalloc((int **)&sizeCuda,3*sizeof(int));
cudaMalloc((unsigned char**)&cudaPic, width*height*3*sizeof(unsigned char));
cudaMalloc((unsigned char**)&cudaOutPic, width*height*3*sizeof(unsigned char));
cudaMalloc((double **)&cudaMask, mask_width*mask_width*sizeof(double));
cudaMemcpy(sizeCuda,size,3*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(cudaPic,pic,width*height*3*sizeof(unsigned char),cudaMemcpyHostToDevice);
cudaMemcpy(cudaMask,mask,mask_width*mask_width*sizeof(double),cudaMemcpyHostToDevice);
cudaMemset(cudaOutPic,0,width*height*3*sizeof(unsigned char));
dim3 block (1024);
dim3 grid (((width*height)/block.x)+1);
printf("%d %d\n",block.x, grid.x);
GaussianBlurCuda<<<grid,block>>>(cudaPic, cudaOutPic, cudaMask, sizeCuda);
cudaDeviceSynchronize();
cudaMemcpy(outpic, cudaOutPic, width*height*3*sizeof(unsigned char), cudaMemcpyDeviceToHost);
// Output file structure
cinfo1.err = jpeg_std_error(&jerr);
jpeg_create_compress(&cinfo1);
jpeg_stdio_dest(&cinfo1, outfile);
cinfo1.image_width = width;
cinfo1.image_height = height;
cinfo1.input_components = 3;
cinfo1.in_color_space = JCS_RGB;
jpeg_set_defaults(&cinfo1);
int quality = 70;
jpeg_set_quality(&cinfo1, quality, TRUE);
jpeg_start_compress(&cinfo1, TRUE);
JSAMPROW row_pointer[1];
while(cinfo1.next_scanline < cinfo1.image_height){
row_pointer[0] = &outpic[cinfo1.next_scanline*width*3];
(void) jpeg_write_scanlines(&cinfo1, row_pointer, 1);
}
jpeg_finish_compress(&cinfo1);
fclose(outfile);
jpeg_destroy_compress(&cinfo1);
double tFinish = cpuSecond();
printf("Time elapsed: %lf seconds.\n",tFinish-tStart);
}
|
4fbbdc6673e0aa3fb85aaea39d4278cc524581d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/BucketizationUtils.h>
#include <ATen/native/Resize.h>
#include <THH/THH.h>
#include <ATen/native/Resize.h>
namespace at {
namespace native {
// Implement a numpy like searchsorted and a TF like bucketize function running on cuda
// See details in ATen/nativate/Bucketization.cpp
namespace {
template<typename input_t>
__device__ int64_t lower_bound(const input_t *data_ss, int64_t start, int64_t end, const input_t val, const int64_t *data_sort) {
// sorter gives relative ordering for ND tensors, so we need to save and add the non-updated start as an offset
// i.e. the second row of a 3x3 tensors starts at element 3 but sorter's second row only contains 0, 1, or 2
const int64_t orig_start = start;
while (start < end) {
const int64_t mid = start + ((end - start) >> 1);
const input_t mid_val = data_sort ? data_ss[orig_start + data_sort[mid]] : data_ss[mid];
if (!(mid_val >= val)) {
start = mid + 1;
}
else {
end = mid;
}
}
return start;
}
template<typename input_t>
__device__ int64_t upper_bound(const input_t *data_ss, int64_t start, int64_t end, const input_t val, const int64_t *data_sort) {
// sorter gives relative ordering for ND tensors, so we need to save and add the non-updated start as an offset
// i.e. the second row of a 3x3 tensors starts at element 3 but sorter's second row only contains 0, 1, or 2
const int64_t orig_start = start;
while (start < end) {
const int64_t mid = start + ((end - start) >> 1);
const input_t mid_val = data_sort ? data_ss[orig_start + data_sort[mid]] : data_ss[mid];
if (!(mid_val > val)) {
start = mid + 1;
}
else {
end = mid;
}
}
return start;
}
template<typename input_t, typename output_t>
__global__ void searchsorted_cuda_kernel(
output_t *data_out,
const input_t *data_in,
const input_t *data_bd,
const int64_t *data_sort,
int64_t idim_in,
int64_t idim_bd,
int64_t numel_in,
bool right,
bool is_1d_boundaries) {
for (int64_t tid = blockIdx.x * blockDim.x + threadIdx.x; tid < numel_in; tid += blockDim.x * gridDim.x) {
// If boundaries tensor is 1d, we always search the entire boundary tensor
int64_t start_bd = is_1d_boundaries ? 0 : tid / idim_in * idim_bd;
int64_t end_bd = start_bd + idim_bd;
int64_t pos = !right ?
lower_bound<input_t>(data_bd, start_bd, end_bd, data_in[tid], data_sort) - start_bd :
upper_bound<input_t>(data_bd, start_bd, end_bd, data_in[tid], data_sort) - start_bd;
// type conversion might happen here
data_out[tid] = pos;
}
}
template<typename input_t, typename output_t>
void searchsorted_cuda_contiguous(Tensor& result, const Tensor& input, const Tensor& boundaries, const bool& right, const Tensor& sorter) {
int64_t numel_in = input.numel();
bool is_scalar_input = input.dim() == 0 && numel_in == 1;
// inner most dim size of input and boundaries
int64_t idim_in = is_scalar_input ? 1 : input.sizes().back();
int64_t idim_bd = boundaries.sizes().back();
const input_t *data_in = input.data_ptr<input_t>();
const input_t *data_bd = boundaries.data_ptr<input_t>();
const int64_t *data_sort = sorter.defined() ? sorter.data_ptr<int64_t>() : nullptr;
output_t *data_out = result.data_ptr<output_t>();
int64_t maxThread = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
int64_t maxGrid = 1024;
dim3 block = dim3(::min(maxThread, numel_in));
dim3 grid = dim3(::min(maxGrid, ceil_div<int64_t>(numel_in, block.x)));
at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( searchsorted_cuda_kernel), dim3(grid), dim3(block), 0, stream,
data_out, data_in, data_bd, data_sort, idim_in, idim_bd, numel_in, right, boundaries.dim() == 1);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
void dispatch(
Tensor& result,
const Tensor& input,
const Tensor& boundaries,
bool out_int32,
bool right,
const Tensor& sorter) {
if (!out_int32) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "searchsorted_out_cuda", [&] {
searchsorted_cuda_contiguous<scalar_t, int64_t>(result, input, boundaries, right, sorter);
});
}
else {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "searchsorted_out_cuda", [&] {
searchsorted_cuda_contiguous<scalar_t, int>(result, input, boundaries, right, sorter);
});
}
}
}
Tensor& searchsorted_out_cuda(
const Tensor& sorted_sequence,
const Tensor& self,
bool out_int32,
bool right,
const c10::optional<c10::string_view> side_opt,
const c10::optional<Tensor>& sorter_opt,
Tensor& result) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> sorter_maybe_owned = at::borrow_from_optional_tensor(sorter_opt);
const Tensor& sorter = *sorter_maybe_owned;
searchsorted_pre_check(sorted_sequence, self, result, out_int32, right, side_opt, sorter);
resize_output(result, self.sizes());
// we have two inputs to set right, pre_check checks that they aren't set to opposites
bool is_right = (side_opt && *side_opt == "right") || right;
if (self.numel() == 0) {
return result;
}
// for non-contiguous result tensors, we write the output to a contiguous copy so we can later copy back, maintaing the original result tensor
Tensor out = result;
if (!result.is_contiguous()) {
out = result.contiguous();
}
if (sorted_sequence.is_contiguous() && self.is_contiguous() && sorted_sequence.dtype() == self.dtype() && sorter.is_contiguous()) {
dispatch(out, self, sorted_sequence, out_int32, is_right, sorter);
}
else {
Tensor trimmed_input;
Tensor trimmed_boundaries;
Tensor trimmed_sorter;
searchsorted_maybe_trim_input_tensors(trimmed_input, trimmed_boundaries, trimmed_sorter, self, sorted_sequence, sorter);
const Tensor& final_input = trimmed_input.defined() ? trimmed_input : self;
const Tensor& final_boundaries = trimmed_boundaries.defined() ? trimmed_boundaries : sorted_sequence;
const Tensor& final_sorter = trimmed_sorter.defined() ? trimmed_sorter : sorter;
dispatch(out, final_input, final_boundaries, out_int32, is_right, final_sorter);
}
// if result is non-contiguous, we wrote the answer to a copied version, so we copy back to the original result tensor
if (!result.is_contiguous()) {
result.copy_(out);
}
return result;
}
Tensor searchsorted_cuda(
const Tensor& sorted_sequence,
const Tensor& self,
bool out_int32,
bool right,
const c10::optional<c10::string_view> side_opt,
const c10::optional<Tensor>& sorter) {
ScalarType scalar_type = out_int32 ? ScalarType::Int : ScalarType::Long;
c10::TensorOptions options = TensorOptions().device(self.options().device()).dtype(scalar_type);
Tensor result = at::empty({0}, options, MemoryFormat::Contiguous);
at::native::searchsorted_out_cuda(sorted_sequence, self, out_int32, right, side_opt, sorter, result);
return result;
}
// See [Note about _torch_cuda_cu_linker_symbol_op and torch_cuda_cu] in native_functions.yaml
Tensor _torch_cuda_cu_linker_symbol_op_cuda(const Tensor& self) {
return self;
}
Tensor searchsorted_cuda(
const Tensor& sorted_sequence,
const Scalar& self,
bool out_int32,
bool right,
const c10::optional<c10::string_view> side_opt,
const c10::optional<Tensor>& sorter) {
const Tensor& scalar_tensor = searchsorted_scalar_tensor(self, sorted_sequence.device());
return searchsorted_cuda(sorted_sequence, scalar_tensor, out_int32, right, side_opt, sorter);
}
Tensor& bucketize_out_cuda(const Tensor& self, const Tensor& boundaries, bool out_int32, bool right, Tensor& result) {
TORCH_CHECK(boundaries.dim() == 1, "boundaries tensor must be 1 dimension, but got dim(", boundaries.dim(), ")");
at::native::searchsorted_out_cuda(boundaries, self, out_int32, right, nullopt, nullopt, result);
return result;
}
Tensor bucketize_cuda(const Tensor& self, const Tensor& boundaries, bool out_int32, bool right) {
ScalarType scalar_type = out_int32 ? ScalarType::Int : ScalarType::Long;
c10::TensorOptions options = TensorOptions().device(self.options().device()).dtype(scalar_type);
Tensor result = at::empty({0}, options, MemoryFormat::Contiguous);
at::native::bucketize_out_cuda(self, boundaries, out_int32, right, result);
return result;
}
Tensor bucketize_cuda(const Scalar& self, const Tensor& boundaries, bool out_int32, bool right) {
return bucketize_cuda(searchsorted_scalar_tensor(self, boundaries.device()), boundaries, out_int32, right);
}
}} // namespace at::native
| 4fbbdc6673e0aa3fb85aaea39d4278cc524581d2.cu | #include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/BucketizationUtils.h>
#include <ATen/native/Resize.h>
#include <THC/THC.h>
#include <ATen/native/Resize.h>
namespace at {
namespace native {
// Implement a numpy like searchsorted and a TF like bucketize function running on cuda
// See details in ATen/nativate/Bucketization.cpp
namespace {
template<typename input_t>
__device__ int64_t lower_bound(const input_t *data_ss, int64_t start, int64_t end, const input_t val, const int64_t *data_sort) {
// sorter gives relative ordering for ND tensors, so we need to save and add the non-updated start as an offset
// i.e. the second row of a 3x3 tensors starts at element 3 but sorter's second row only contains 0, 1, or 2
const int64_t orig_start = start;
while (start < end) {
const int64_t mid = start + ((end - start) >> 1);
const input_t mid_val = data_sort ? data_ss[orig_start + data_sort[mid]] : data_ss[mid];
if (!(mid_val >= val)) {
start = mid + 1;
}
else {
end = mid;
}
}
return start;
}
template<typename input_t>
__device__ int64_t upper_bound(const input_t *data_ss, int64_t start, int64_t end, const input_t val, const int64_t *data_sort) {
// sorter gives relative ordering for ND tensors, so we need to save and add the non-updated start as an offset
// i.e. the second row of a 3x3 tensors starts at element 3 but sorter's second row only contains 0, 1, or 2
const int64_t orig_start = start;
while (start < end) {
const int64_t mid = start + ((end - start) >> 1);
const input_t mid_val = data_sort ? data_ss[orig_start + data_sort[mid]] : data_ss[mid];
if (!(mid_val > val)) {
start = mid + 1;
}
else {
end = mid;
}
}
return start;
}
template<typename input_t, typename output_t>
__global__ void searchsorted_cuda_kernel(
output_t *data_out,
const input_t *data_in,
const input_t *data_bd,
const int64_t *data_sort,
int64_t idim_in,
int64_t idim_bd,
int64_t numel_in,
bool right,
bool is_1d_boundaries) {
for (int64_t tid = blockIdx.x * blockDim.x + threadIdx.x; tid < numel_in; tid += blockDim.x * gridDim.x) {
// If boundaries tensor is 1d, we always search the entire boundary tensor
int64_t start_bd = is_1d_boundaries ? 0 : tid / idim_in * idim_bd;
int64_t end_bd = start_bd + idim_bd;
int64_t pos = !right ?
lower_bound<input_t>(data_bd, start_bd, end_bd, data_in[tid], data_sort) - start_bd :
upper_bound<input_t>(data_bd, start_bd, end_bd, data_in[tid], data_sort) - start_bd;
// type conversion might happen here
data_out[tid] = pos;
}
}
template<typename input_t, typename output_t>
void searchsorted_cuda_contiguous(Tensor& result, const Tensor& input, const Tensor& boundaries, const bool& right, const Tensor& sorter) {
int64_t numel_in = input.numel();
bool is_scalar_input = input.dim() == 0 && numel_in == 1;
// inner most dim size of input and boundaries
int64_t idim_in = is_scalar_input ? 1 : input.sizes().back();
int64_t idim_bd = boundaries.sizes().back();
const input_t *data_in = input.data_ptr<input_t>();
const input_t *data_bd = boundaries.data_ptr<input_t>();
const int64_t *data_sort = sorter.defined() ? sorter.data_ptr<int64_t>() : nullptr;
output_t *data_out = result.data_ptr<output_t>();
int64_t maxThread = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
int64_t maxGrid = 1024;
dim3 block = dim3(std::min(maxThread, numel_in));
dim3 grid = dim3(std::min(maxGrid, ceil_div<int64_t>(numel_in, block.x)));
at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream();
searchsorted_cuda_kernel<<<grid, block, 0, stream>>>(
data_out, data_in, data_bd, data_sort, idim_in, idim_bd, numel_in, right, boundaries.dim() == 1);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
void dispatch(
Tensor& result,
const Tensor& input,
const Tensor& boundaries,
bool out_int32,
bool right,
const Tensor& sorter) {
if (!out_int32) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "searchsorted_out_cuda", [&] {
searchsorted_cuda_contiguous<scalar_t, int64_t>(result, input, boundaries, right, sorter);
});
}
else {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "searchsorted_out_cuda", [&] {
searchsorted_cuda_contiguous<scalar_t, int>(result, input, boundaries, right, sorter);
});
}
}
}
Tensor& searchsorted_out_cuda(
const Tensor& sorted_sequence,
const Tensor& self,
bool out_int32,
bool right,
const c10::optional<c10::string_view> side_opt,
const c10::optional<Tensor>& sorter_opt,
Tensor& result) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> sorter_maybe_owned = at::borrow_from_optional_tensor(sorter_opt);
const Tensor& sorter = *sorter_maybe_owned;
searchsorted_pre_check(sorted_sequence, self, result, out_int32, right, side_opt, sorter);
resize_output(result, self.sizes());
// we have two inputs to set right, pre_check checks that they aren't set to opposites
bool is_right = (side_opt && *side_opt == "right") || right;
if (self.numel() == 0) {
return result;
}
// for non-contiguous result tensors, we write the output to a contiguous copy so we can later copy back, maintaing the original result tensor
Tensor out = result;
if (!result.is_contiguous()) {
out = result.contiguous();
}
if (sorted_sequence.is_contiguous() && self.is_contiguous() && sorted_sequence.dtype() == self.dtype() && sorter.is_contiguous()) {
dispatch(out, self, sorted_sequence, out_int32, is_right, sorter);
}
else {
Tensor trimmed_input;
Tensor trimmed_boundaries;
Tensor trimmed_sorter;
searchsorted_maybe_trim_input_tensors(trimmed_input, trimmed_boundaries, trimmed_sorter, self, sorted_sequence, sorter);
const Tensor& final_input = trimmed_input.defined() ? trimmed_input : self;
const Tensor& final_boundaries = trimmed_boundaries.defined() ? trimmed_boundaries : sorted_sequence;
const Tensor& final_sorter = trimmed_sorter.defined() ? trimmed_sorter : sorter;
dispatch(out, final_input, final_boundaries, out_int32, is_right, final_sorter);
}
// if result is non-contiguous, we wrote the answer to a copied version, so we copy back to the original result tensor
if (!result.is_contiguous()) {
result.copy_(out);
}
return result;
}
Tensor searchsorted_cuda(
const Tensor& sorted_sequence,
const Tensor& self,
bool out_int32,
bool right,
const c10::optional<c10::string_view> side_opt,
const c10::optional<Tensor>& sorter) {
ScalarType scalar_type = out_int32 ? ScalarType::Int : ScalarType::Long;
c10::TensorOptions options = TensorOptions().device(self.options().device()).dtype(scalar_type);
Tensor result = at::empty({0}, options, MemoryFormat::Contiguous);
at::native::searchsorted_out_cuda(sorted_sequence, self, out_int32, right, side_opt, sorter, result);
return result;
}
// See [Note about _torch_cuda_cu_linker_symbol_op and torch_cuda_cu] in native_functions.yaml
Tensor _torch_cuda_cu_linker_symbol_op_cuda(const Tensor& self) {
return self;
}
Tensor searchsorted_cuda(
const Tensor& sorted_sequence,
const Scalar& self,
bool out_int32,
bool right,
const c10::optional<c10::string_view> side_opt,
const c10::optional<Tensor>& sorter) {
const Tensor& scalar_tensor = searchsorted_scalar_tensor(self, sorted_sequence.device());
return searchsorted_cuda(sorted_sequence, scalar_tensor, out_int32, right, side_opt, sorter);
}
Tensor& bucketize_out_cuda(const Tensor& self, const Tensor& boundaries, bool out_int32, bool right, Tensor& result) {
TORCH_CHECK(boundaries.dim() == 1, "boundaries tensor must be 1 dimension, but got dim(", boundaries.dim(), ")");
at::native::searchsorted_out_cuda(boundaries, self, out_int32, right, nullopt, nullopt, result);
return result;
}
Tensor bucketize_cuda(const Tensor& self, const Tensor& boundaries, bool out_int32, bool right) {
ScalarType scalar_type = out_int32 ? ScalarType::Int : ScalarType::Long;
c10::TensorOptions options = TensorOptions().device(self.options().device()).dtype(scalar_type);
Tensor result = at::empty({0}, options, MemoryFormat::Contiguous);
at::native::bucketize_out_cuda(self, boundaries, out_int32, right, result);
return result;
}
Tensor bucketize_cuda(const Scalar& self, const Tensor& boundaries, bool out_int32, bool right) {
return bucketize_cuda(searchsorted_scalar_tensor(self, boundaries.device()), boundaries, out_int32, right);
}
}} // namespace at::native
|
c6235849bf9d3b09f3de442898b7d1aa3d1156e3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "set_grid_array_to_value.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *arr = NULL;
hipMalloc(&arr, XSIZE*YSIZE);
float value = 2;
int N_grid = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
set_grid_array_to_value), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,value,N_grid);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
set_grid_array_to_value), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,value,N_grid);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
set_grid_array_to_value), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,value,N_grid);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c6235849bf9d3b09f3de442898b7d1aa3d1156e3.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "set_grid_array_to_value.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *arr = NULL;
cudaMalloc(&arr, XSIZE*YSIZE);
float value = 2;
int N_grid = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
set_grid_array_to_value<<<gridBlock,threadBlock>>>(arr,value,N_grid);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
set_grid_array_to_value<<<gridBlock,threadBlock>>>(arr,value,N_grid);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
set_grid_array_to_value<<<gridBlock,threadBlock>>>(arr,value,N_grid);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
77fa54cf316740ab7cb6c1b8f62e45a114937d12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include <conio.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <windows.h>
using namespace std;
//Funciones que van a utilizarse a lo largo del programa
//CPU
void generarTablero(int *tablero, int filas, int columnas);
void imprimirTablero(int *tablero, int filas, int columnas);
void imprimirColumnas(int columnas);
void generarSemillas(int *tablero, int filas, int columnas);
void guardarPartida(int *tablero, int filas, int columnas/*, int dificultad*/);
void cargarPartida();
void modoManual(int *tablero, int filas, int columnas);
//GPU
__global__ void juegoManual(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaSemillas(int *tablero, int filas, int columnas, char movimiento);
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
//AUX
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
int main(void){
//Almacenamos las propiedades de la tarjeta para no exceder el numero de hilos posibles en el tablero
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
//Propiedades del tablero
int *tablero;
int filas = 0;
int columnas = 0;
int dificultad = 0;
//Preguntamos si quiere cargar un juego guardado anteriormente o si quiere empezar de nuevo
cout << "Quieres continuar una partida anterior o empezar de nuevo? (C: Cargar / N: Nueva partida)\n";
char partida = 'X';
cin >> partida;
while (partida != 'C' && partida != 'N') {
cout << "Introduce un valor valido para iniciar el juego\n";
cin >> partida;
}
if (partida == 'N'){
//Recogemos los datos de filas y columnas del tablero que vamos a usar
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
//Tablero mnimo de 4 por 4
while (filas < 4) {
cout << "El numero de filas con las que desea jugar es demasiado pequeo, el minimo aceptado es 4: \n";
cin >> filas;
}
while (columnas < 4) {
cout << "El numero de columnas con las que desea jugar es demasiado pequeo, el minimo aceptado es 4: \n";
cin >> columnas;
}
while (prop.maxThreadsPerBlock < (filas * columnas)) {
cout << "Has excedido el limite de semillas posibles para el tablero, introduce las filas y las columnas de nuevo: \n";
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
}
//Reservamos la memoria del tablero y lo inicializamos con generar tablero
tablero = new int[filas * columnas];
generarTablero(tablero, filas, columnas);
modoManual(tablero, filas, columnas);
}
else {
cargarPartida();
}
system("PAUSE");
}
//Generar tablero con nmeros aleatorios
void generarTablero(int *tablero, int filas, int columnas){
srand(time(0));
int tamao = filas * columnas;
for (int i = 0; i < tamao; i++){
tablero[i] = 0;
}
generarSemillas(tablero, filas, columnas);
}
//Genera los nmeros para jugar en el tablero
void generarSemillas(int *tablero, int filas, int columnas){
int tamao = filas * columnas;
int contador = 0;
while (contador < 3){
int aux = rand() % 3;
int i = rand() % tamao;
//cout << "POSICION: " << i+1 << "\n";
if (tablero[i] == 0){
switch (aux){
case 0:
tablero[i] = 2;
break;
case 1:
tablero[i] = 4;
break;
case 2:
tablero[i] = 8;
break;
}
contador++;
}
}
}
//Funcin que imprime el nmero de columnas que va a tener el tablero para que sea ms facil elegir semillas
void imprimirColumnas(int columnas) {
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " " << i + 1;
}
else {
if (i < 9) {
cout << " " << i + 1;
}
else {
cout << " " << i + 1;
}
}
}
cout << "\n";
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " |";
}
else {
cout << " |";
}
}
cout << "\n";
}
//Imprimimos el tablero
void imprimirTablero(int *tablero, int filas, int columnas) {
cout << "SE HAN GENERADO " << filas << " FILAS Y " << columnas << " COLUMNAS\n";
cout << "+-+-+-TABLERO DE JUEGO-+-+-+\n\n";
imprimirColumnas(columnas);
for (int i = 0; i < filas; i++) {
if (i < 9) {
cout << i + 1 << " - ";
}
else {
cout << i + 1 << " - ";
}
for (int k = 0; k < columnas; k++) {
//Damos color en funcin del nmero imprimido
int bloque = tablero[i * filas + k];
switch (bloque) {
case 2:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 14); //Amarillo
break;
case 4:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 12); //Rojo
break;
case 8:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 13); //Morado
break;
case 16:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 9); //Azul
break;
default:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7); //Blanco
}
if (bloque < 10) cout << "| " << bloque << " |";
else cout << "| " << bloque << "|";
}
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7);
cout << "\n";
}
}
__device__ void compruebaSemillas(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
switch (movimiento){
case 'W':
compruebaAbajo(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'S':
compruebaArriba(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'D':
compruebaIzquierda(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'A':
compruebaDerecha(tablero, fila, columna, filas, columnas, movimiento);
break;
}
}
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
if (movimiento == 'W'){
for (int i = filas - 1; i > 0; i--){
for (int j = i; j > 0; j--){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j - 1) * columnas) + columna] == 0){
tablero[((j - 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
if (movimiento == 'S'){
for (int i = 0; i < filas - 1; i++){
for (int j = i; j < filas - 1; j++){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j + 1) * columnas) + columna] == 0){
tablero[((j + 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
/*
for (int i = filas - 1; i > 0; i--){
if (tablero[(i * columnas) + columna] == 0){
tablero[(i * columnas) + columna] = tablero[((i - 1) * columnas) + columna];
tablero[((i - 1) * columnas) + columna] = 0;
}
}*/
/*for (int i = filas - 1; i > 0; i--){
if (tablero[((i - 1) * columnas) + columna] != 0){
int a = i;
while (tablero[((a - 1) * columnas) + columna] == 0){
tablero[(a * columnas) + columna] = tablero[((a - 1) * columnas) + columna];
tablero[((a - 1) * columnas) + columna] = 0;
}
}
}*/
}
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila - 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila - 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
//compruebaArriba(tablero, fila - 1, columna, filas, columnas);
}
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila + 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila + 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna + 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna + 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna - 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna - 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__global__ void juegoManual(int *tablero, int filas, int columnas, char movimiento){
//Guardamos la columna y la fila del hilo
int columnaHilo = threadIdx.x;
int filaHilo = threadIdx.y;
compruebaSemillas(tablero, filaHilo, columnaHilo, filas, columnas, movimiento);
__syncthreads();
}
void guardarPartida(int *tablero, int filas, int columnas/*, int dificultad*/) {
ofstream doc;
doc.open("partida.txt");
doc << filas << "\n";
doc << columnas << "\n";
//doc << dificultad << "\n";
for (int i = 0; i < filas * columnas; i++) {
doc << tablero[i] << " ";
}
doc.close();
system("cls");
cout << "Guardado correctamente.\n\n";
}
void cargarPartida() { //NO FUNCIONA LEE
const string fichero = "partida.txt";
ifstream leer;
leer.open(fichero.c_str());
int d, *tablero;
int i = 0;
int n = 48;
int f = 0;
int c = 0;
char fila[80];
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
f = f * 10 + (n - 48);
}
}
}
n = 48;
i = 0;
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
c = c * 10 + (n - 48);
}
}
}
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
d = (int)fila[0] - 48;
}
tablero = new int[f*c];
for (int i = 0; i < f * c; i++) {
leer.getline(fila, 80, ' ');
tablero[i] = (int)fila[0] - 48;
}
leer.close();
modoManual(tablero, f, c);
}
void modoManual(int *tablero, int filas, int columnas){
//system("cls");
char movimiento = ' ';
while (movimiento != 'Z'){
imprimirTablero(tablero, filas, columnas);
cout << "Pulsa W, A, S o D para mover los numeros (Z para salir): \n";
cin >> movimiento;
//while (movimiento != (ARRIBA || ABAJO || IZQUIERDA || DERECHA)) {
while (movimiento != 'W' && movimiento != 'S' && movimiento != 'A' && movimiento != 'D' && movimiento != 'Z') {
cout << "Tecla no valida, introduzca una valida:\n";
cin >> movimiento;
}
//CUDA
int *tablero_gpu;
//Reservamos memoria y copiamos tablero en GPU
hipMalloc((void**)&tablero_gpu, (filas * columnas) * sizeof(int));
hipMemcpy(tablero_gpu, tablero, (filas * columnas) * sizeof(int), hipMemcpyHostToDevice);
//Creamos los hilos en un solo bloque
dim3 DimGrid(1, 1);
dim3 DimBlock(filas, columnas);
juegoManual << < DimGrid, DimBlock >> > (tablero_gpu, filas, columnas, movimiento);
hipMemcpy(tablero, tablero_gpu, sizeof(int)* filas * columnas, hipMemcpyDeviceToHost);
//system("cls");
generarSemillas(tablero, filas, columnas);
hipFree(tablero_gpu);
}
//system("cls");
cout << "Deseas guardar la partida? (S/N)\n";
char guardar = 'x';
cin >> guardar;
while (guardar != 'S' && guardar != 'N') {
system("cls");
cout << "Valor no valido, quieres guardar la partida? (S/N): \n";
cin >> guardar;
}
if (guardar == 'S') {
guardarPartida(tablero, filas, columnas/*, dificultad*/);
}
else {
cout << "Saliendo sin guardar...\n \n";
}
} | 77fa54cf316740ab7cb6c1b8f62e45a114937d12.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <conio.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <windows.h>
using namespace std;
//Funciones que van a utilizarse a lo largo del programa
//CPU
void generarTablero(int *tablero, int filas, int columnas);
void imprimirTablero(int *tablero, int filas, int columnas);
void imprimirColumnas(int columnas);
void generarSemillas(int *tablero, int filas, int columnas);
void guardarPartida(int *tablero, int filas, int columnas/*, int dificultad*/);
void cargarPartida();
void modoManual(int *tablero, int filas, int columnas);
//GPU
__global__ void juegoManual(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaSemillas(int *tablero, int filas, int columnas, char movimiento);
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
//AUX
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
int main(void){
//Almacenamos las propiedades de la tarjeta para no exceder el numero de hilos posibles en el tablero
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
//Propiedades del tablero
int *tablero;
int filas = 0;
int columnas = 0;
int dificultad = 0;
//Preguntamos si quiere cargar un juego guardado anteriormente o si quiere empezar de nuevo
cout << "Quieres continuar una partida anterior o empezar de nuevo? (C: Cargar / N: Nueva partida)\n";
char partida = 'X';
cin >> partida;
while (partida != 'C' && partida != 'N') {
cout << "Introduce un valor valido para iniciar el juego\n";
cin >> partida;
}
if (partida == 'N'){
//Recogemos los datos de filas y columnas del tablero que vamos a usar
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
//Tablero mínimo de 4 por 4
while (filas < 4) {
cout << "El numero de filas con las que desea jugar es demasiado pequeño, el minimo aceptado es 4: \n";
cin >> filas;
}
while (columnas < 4) {
cout << "El numero de columnas con las que desea jugar es demasiado pequeño, el minimo aceptado es 4: \n";
cin >> columnas;
}
while (prop.maxThreadsPerBlock < (filas * columnas)) {
cout << "Has excedido el limite de semillas posibles para el tablero, introduce las filas y las columnas de nuevo: \n";
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
}
//Reservamos la memoria del tablero y lo inicializamos con generar tablero
tablero = new int[filas * columnas];
generarTablero(tablero, filas, columnas);
modoManual(tablero, filas, columnas);
}
else {
cargarPartida();
}
system("PAUSE");
}
//Generar tablero con números aleatorios
void generarTablero(int *tablero, int filas, int columnas){
srand(time(0));
int tamaño = filas * columnas;
for (int i = 0; i < tamaño; i++){
tablero[i] = 0;
}
generarSemillas(tablero, filas, columnas);
}
//Genera los números para jugar en el tablero
void generarSemillas(int *tablero, int filas, int columnas){
int tamaño = filas * columnas;
int contador = 0;
while (contador < 3){
int aux = rand() % 3;
int i = rand() % tamaño;
//cout << "POSICION: " << i+1 << "\n";
if (tablero[i] == 0){
switch (aux){
case 0:
tablero[i] = 2;
break;
case 1:
tablero[i] = 4;
break;
case 2:
tablero[i] = 8;
break;
}
contador++;
}
}
}
//Función que imprime el número de columnas que va a tener el tablero para que sea más facil elegir semillas
void imprimirColumnas(int columnas) {
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " " << i + 1;
}
else {
if (i < 9) {
cout << " " << i + 1;
}
else {
cout << " " << i + 1;
}
}
}
cout << "\n";
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " |";
}
else {
cout << " |";
}
}
cout << "\n";
}
//Imprimimos el tablero
void imprimirTablero(int *tablero, int filas, int columnas) {
cout << "SE HAN GENERADO " << filas << " FILAS Y " << columnas << " COLUMNAS\n";
cout << "+-+-+-TABLERO DE JUEGO-+-+-+\n\n";
imprimirColumnas(columnas);
for (int i = 0; i < filas; i++) {
if (i < 9) {
cout << i + 1 << " - ";
}
else {
cout << i + 1 << " - ";
}
for (int k = 0; k < columnas; k++) {
//Damos color en función del número imprimido
int bloque = tablero[i * filas + k];
switch (bloque) {
case 2:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 14); //Amarillo
break;
case 4:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 12); //Rojo
break;
case 8:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 13); //Morado
break;
case 16:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 9); //Azul
break;
default:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7); //Blanco
}
if (bloque < 10) cout << "| " << bloque << " |";
else cout << "| " << bloque << "|";
}
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7);
cout << "\n";
}
}
__device__ void compruebaSemillas(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
switch (movimiento){
case 'W':
compruebaAbajo(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'S':
compruebaArriba(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'D':
compruebaIzquierda(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'A':
compruebaDerecha(tablero, fila, columna, filas, columnas, movimiento);
break;
}
}
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
if (movimiento == 'W'){
for (int i = filas - 1; i > 0; i--){
for (int j = i; j > 0; j--){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j - 1) * columnas) + columna] == 0){
tablero[((j - 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
if (movimiento == 'S'){
for (int i = 0; i < filas - 1; i++){
for (int j = i; j < filas - 1; j++){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j + 1) * columnas) + columna] == 0){
tablero[((j + 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
/*
for (int i = filas - 1; i > 0; i--){
if (tablero[(i * columnas) + columna] == 0){
tablero[(i * columnas) + columna] = tablero[((i - 1) * columnas) + columna];
tablero[((i - 1) * columnas) + columna] = 0;
}
}*/
/*for (int i = filas - 1; i > 0; i--){
if (tablero[((i - 1) * columnas) + columna] != 0){
int a = i;
while (tablero[((a - 1) * columnas) + columna] == 0){
tablero[(a * columnas) + columna] = tablero[((a - 1) * columnas) + columna];
tablero[((a - 1) * columnas) + columna] = 0;
}
}
}*/
}
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila - 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila - 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
//compruebaArriba(tablero, fila - 1, columna, filas, columnas);
}
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila + 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila + 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna + 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna + 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna - 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna - 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__global__ void juegoManual(int *tablero, int filas, int columnas, char movimiento){
//Guardamos la columna y la fila del hilo
int columnaHilo = threadIdx.x;
int filaHilo = threadIdx.y;
compruebaSemillas(tablero, filaHilo, columnaHilo, filas, columnas, movimiento);
__syncthreads();
}
void guardarPartida(int *tablero, int filas, int columnas/*, int dificultad*/) {
ofstream doc;
doc.open("partida.txt");
doc << filas << "\n";
doc << columnas << "\n";
//doc << dificultad << "\n";
for (int i = 0; i < filas * columnas; i++) {
doc << tablero[i] << " ";
}
doc.close();
system("cls");
cout << "Guardado correctamente.\n\n";
}
void cargarPartida() { //NO FUNCIONA LEÑE
const string fichero = "partida.txt";
ifstream leer;
leer.open(fichero.c_str());
int d, *tablero;
int i = 0;
int n = 48;
int f = 0;
int c = 0;
char fila[80];
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
f = f * 10 + (n - 48);
}
}
}
n = 48;
i = 0;
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
c = c * 10 + (n - 48);
}
}
}
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
d = (int)fila[0] - 48;
}
tablero = new int[f*c];
for (int i = 0; i < f * c; i++) {
leer.getline(fila, 80, ' ');
tablero[i] = (int)fila[0] - 48;
}
leer.close();
modoManual(tablero, f, c);
}
void modoManual(int *tablero, int filas, int columnas){
//system("cls");
char movimiento = ' ';
while (movimiento != 'Z'){
imprimirTablero(tablero, filas, columnas);
cout << "Pulsa W, A, S o D para mover los numeros (Z para salir): \n";
cin >> movimiento;
//while (movimiento != (ARRIBA || ABAJO || IZQUIERDA || DERECHA)) {
while (movimiento != 'W' && movimiento != 'S' && movimiento != 'A' && movimiento != 'D' && movimiento != 'Z') {
cout << "Tecla no valida, introduzca una valida:\n";
cin >> movimiento;
}
//CUDA
int *tablero_gpu;
//Reservamos memoria y copiamos tablero en GPU
cudaMalloc((void**)&tablero_gpu, (filas * columnas) * sizeof(int));
cudaMemcpy(tablero_gpu, tablero, (filas * columnas) * sizeof(int), cudaMemcpyHostToDevice);
//Creamos los hilos en un solo bloque
dim3 DimGrid(1, 1);
dim3 DimBlock(filas, columnas);
juegoManual << < DimGrid, DimBlock >> > (tablero_gpu, filas, columnas, movimiento);
cudaMemcpy(tablero, tablero_gpu, sizeof(int)* filas * columnas, cudaMemcpyDeviceToHost);
//system("cls");
generarSemillas(tablero, filas, columnas);
cudaFree(tablero_gpu);
}
//system("cls");
cout << "Deseas guardar la partida? (S/N)\n";
char guardar = 'x';
cin >> guardar;
while (guardar != 'S' && guardar != 'N') {
system("cls");
cout << "Valor no valido, quieres guardar la partida? (S/N): \n";
cin >> guardar;
}
if (guardar == 'S') {
guardarPartida(tablero, filas, columnas/*, dificultad*/);
}
else {
cout << "Saliendo sin guardar...\n \n";
}
} |
8b9c2d316a99cd99040a57e806d5d8ac9d915d2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "definitions.h"
#include "kernel.h"
#include "MatInvLib.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__global__ void kernel_calCRLB(float *ParamF, float *ParamVar, int Nfit)
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int BlockSize = blockDim.x;
//Prevent read/write past end of array
int j = BlockSize*bx + tx;
if ((bx*BlockSize + tx) >= Nfit) return;
int s, k;
float FisherM[NPL*NPL];
float LowerBi[NPL*NPL];
float DiagLowerBi[NPL];
for (k = 0; k < NPL*NPL; k++) FisherM[k] = 0;
for (k = 0; k < NPL*NPL; k++)
{
for (s = 0; s < 4; s++)
{
FisherM[k] += ParamF[s*NPL*NPL*Nfit + j*NPL*NPL + k];
}
//FisherM[k] = (FisherM[k] < 1e-6 ? 1e-6 : FisherM[k]); //prevent negative value in Fisher Information
}
kernel_MatInvN(FisherM, LowerBi, DiagLowerBi, NPL);
for (k = 0; k < NPL; k++) ParamVar[j*NPL + k] = DiagLowerBi[k];
}
__global__ void kernel_calFisherM(float *PSF, float *dPSFx, float *dPSFy, float *dPSFz, float *I, float *bg, float *gainR, float *ParamF,
int Q, int Nfit, int PSFSize){
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int BlockSize = blockDim.x;
//Prevent read/write past end of array
int j = BlockSize*bx + tx;
if ((bx*BlockSize + tx) >= Nfit) return;
int t, k, i, s;
float PSFa0;
float funFi1[NPL];
float tmp1;
float FisherM[NPL*NPL];
float w;
for (i = 0; i < NPL*NPL; i++) FisherM[i] = 0;
for (i = 0; i < PSFSize; i++)
{
PSFa0 = PSF[j*PSFSize + i] * I[j] + bg[j] + gainR[j*PSFSize + i];
//x
funFi1[0] = dPSFx[j*PSFSize + i] * I[j];
//y
funFi1[1] = dPSFy[j*PSFSize + i] * I[j];
//z
funFi1[2] = dPSFz[j*PSFSize + i] * I[j];
//I
funFi1[3] = PSF[j*PSFSize + i];
//bg
funFi1[4] = 1;
for (t = 0; t < NPL; t++)
{
for (k = 0; k < NPL; k++)
{
tmp1 = funFi1[t] * funFi1[k] / fmaxf(PSFa0, 1e-4f);
FisherM[t*NPL + k] += tmp1;
}
}
}
for (k = 0; k < NPL*NPL; k++) ParamF[j*NPL*NPL + k] = FisherM[k];
}
| 8b9c2d316a99cd99040a57e806d5d8ac9d915d2d.cu | #include "cuda_runtime.h"
#include "definitions.h"
#include "kernel.h"
#include "MatInvLib.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__global__ void kernel_calCRLB(float *ParamF, float *ParamVar, int Nfit)
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int BlockSize = blockDim.x;
//Prevent read/write past end of array
int j = BlockSize*bx + tx;
if ((bx*BlockSize + tx) >= Nfit) return;
int s, k;
float FisherM[NPL*NPL];
float LowerBi[NPL*NPL];
float DiagLowerBi[NPL];
for (k = 0; k < NPL*NPL; k++) FisherM[k] = 0;
for (k = 0; k < NPL*NPL; k++)
{
for (s = 0; s < 4; s++)
{
FisherM[k] += ParamF[s*NPL*NPL*Nfit + j*NPL*NPL + k];
}
//FisherM[k] = (FisherM[k] < 1e-6 ? 1e-6 : FisherM[k]); //prevent negative value in Fisher Information
}
kernel_MatInvN(FisherM, LowerBi, DiagLowerBi, NPL);
for (k = 0; k < NPL; k++) ParamVar[j*NPL + k] = DiagLowerBi[k];
}
__global__ void kernel_calFisherM(float *PSF, float *dPSFx, float *dPSFy, float *dPSFz, float *I, float *bg, float *gainR, float *ParamF,
int Q, int Nfit, int PSFSize){
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int BlockSize = blockDim.x;
//Prevent read/write past end of array
int j = BlockSize*bx + tx;
if ((bx*BlockSize + tx) >= Nfit) return;
int t, k, i, s;
float PSFa0;
float funFi1[NPL];
float tmp1;
float FisherM[NPL*NPL];
float w;
for (i = 0; i < NPL*NPL; i++) FisherM[i] = 0;
for (i = 0; i < PSFSize; i++)
{
PSFa0 = PSF[j*PSFSize + i] * I[j] + bg[j] + gainR[j*PSFSize + i];
//x
funFi1[0] = dPSFx[j*PSFSize + i] * I[j];
//y
funFi1[1] = dPSFy[j*PSFSize + i] * I[j];
//z
funFi1[2] = dPSFz[j*PSFSize + i] * I[j];
//I
funFi1[3] = PSF[j*PSFSize + i];
//bg
funFi1[4] = 1;
for (t = 0; t < NPL; t++)
{
for (k = 0; k < NPL; k++)
{
tmp1 = funFi1[t] * funFi1[k] / fmaxf(PSFa0, 1e-4f);
FisherM[t*NPL + k] += tmp1;
}
}
}
for (k = 0; k < NPL*NPL; k++) ParamF[j*NPL*NPL + k] = FisherM[k];
}
|
9a26377569b618a15cbcc39bdfa2702d4a97c6cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgeellrtmv.cu normal z -> d, Wed Sep 17 15:08:43 2014
*/
#include "common_magma.h"
//F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University
__global__ void
dgeellrtmv_kernel_32( int num_rows,
int num_cols,
double alpha,
double *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
double *d_x,
double beta,
double *d_y,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if(i < num_rows ){
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//double val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d_x[ col ];
}
shared[idb] = dot;
if( idp < 16 ){
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i];
}
}
}
}
//F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University
__global__ void
dgeellrtmv_kernel_16( int num_rows,
int num_cols,
double alpha,
double *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
double *d_x,
double beta,
double *d_y,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if(i < num_rows ){
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//double val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d_x[ col ];
}
shared[idb] = dot;
if( idp < 8 ){
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i];
}
}
}
}
//F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University
__global__ void
dgeellrtmv_kernel_8( int num_rows,
int num_cols,
double alpha,
double *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
double *d_x,
double beta,
double *d_y,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if(i < num_rows ){
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//double val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d_x[ col ];
}
shared[idb] = dot;
if( idp < 4 ){
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i];
}
}
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLRT. The ideas are taken from
"Improving the performance of the sparse matrix
vector product with GPUs", (CIT 2010),
and modified to provide correct values.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows
@param
n magma_int_t
number of columns
@param
nnz_per_row magma_int_t
max number of nonzeros in a row
@param
alpha double
scalar alpha
@param
d_val double*
val array
@param
d_colind magma_int_t*
col indices
@param
d_rowlength magma_int_t*
number of elements in each row
@param
d_x double*
input vector x
@param
beta double
scalar beta
@param
d_y double*
output vector y
@param
blocksize magma_int_t
threads per block
@param
alignment magma_int_t
threads assigned to each row
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgeellrtmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
double alpha,
double *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
double *d_x,
double beta,
double *d_y,
magma_int_t alignment,
magma_int_t blocksize ){
int num_blocks = ( (m+blocksize-1)/blocksize);
int num_threads = alignment*blocksize;
int real_row_length = ((int)(nnz_per_row+alignment-1)/alignment)
*alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = sqrt(num_blocks);
int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = alignment * blocksize * sizeof( double );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if( alignment == 32 ){
hipLaunchKernelGGL(( dgeellrtmv_kernel_32), dim3(grid), dim3(num_threads) , Ms, magma_stream ,
m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y,
alignment, real_row_length );
}
else if( alignment == 16 ){
hipLaunchKernelGGL(( dgeellrtmv_kernel_16), dim3(grid), dim3(num_threads) , Ms, magma_stream ,
m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y,
alignment, real_row_length );
}
else if( alignment == 8 ){
hipLaunchKernelGGL(( dgeellrtmv_kernel_8), dim3(grid), dim3(num_threads) , Ms, magma_stream ,
m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y,
alignment, real_row_length );
}
else{
printf("error: alignment %d not supported.\n", alignment);
exit(-1);
}
return MAGMA_SUCCESS;
}
| 9a26377569b618a15cbcc39bdfa2702d4a97c6cc.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgeellrtmv.cu normal z -> d, Wed Sep 17 15:08:43 2014
*/
#include "common_magma.h"
//F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University
__global__ void
dgeellrtmv_kernel_32( int num_rows,
int num_cols,
double alpha,
double *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
double *d_x,
double beta,
double *d_y,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if(i < num_rows ){
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//double val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d_x[ col ];
}
shared[idb] = dot;
if( idp < 16 ){
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i];
}
}
}
}
//F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University
__global__ void
dgeellrtmv_kernel_16( int num_rows,
int num_cols,
double alpha,
double *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
double *d_x,
double beta,
double *d_y,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if(i < num_rows ){
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//double val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d_x[ col ];
}
shared[idb] = dot;
if( idp < 8 ){
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i];
}
}
}
}
//F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University
__global__ void
dgeellrtmv_kernel_8( int num_rows,
int num_cols,
double alpha,
double *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
double *d_x,
double beta,
double *d_y,
int T,
int alignment )
{
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ double shared[];
if(i < num_rows ){
double dot = MAGMA_D_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//double val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
double val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d_x[ col ];
}
shared[idb] = dot;
if( idp < 4 ){
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i];
}
}
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLRT. The ideas are taken from
"Improving the performance of the sparse matrix
vector product with GPUs", (CIT 2010),
and modified to provide correct values.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows
@param
n magma_int_t
number of columns
@param
nnz_per_row magma_int_t
max number of nonzeros in a row
@param
alpha double
scalar alpha
@param
d_val double*
val array
@param
d_colind magma_int_t*
col indices
@param
d_rowlength magma_int_t*
number of elements in each row
@param
d_x double*
input vector x
@param
beta double
scalar beta
@param
d_y double*
output vector y
@param
blocksize magma_int_t
threads per block
@param
alignment magma_int_t
threads assigned to each row
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgeellrtmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
double alpha,
double *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
double *d_x,
double beta,
double *d_y,
magma_int_t alignment,
magma_int_t blocksize ){
int num_blocks = ( (m+blocksize-1)/blocksize);
int num_threads = alignment*blocksize;
int real_row_length = ((int)(nnz_per_row+alignment-1)/alignment)
*alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = sqrt(num_blocks);
int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = alignment * blocksize * sizeof( double );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if( alignment == 32 ){
dgeellrtmv_kernel_32<<< grid, num_threads , Ms, magma_stream >>>
( m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y,
alignment, real_row_length );
}
else if( alignment == 16 ){
dgeellrtmv_kernel_16<<< grid, num_threads , Ms, magma_stream >>>
( m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y,
alignment, real_row_length );
}
else if( alignment == 8 ){
dgeellrtmv_kernel_8<<< grid, num_threads , Ms, magma_stream >>>
( m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y,
alignment, real_row_length );
}
else{
printf("error: alignment %d not supported.\n", alignment);
exit(-1);
}
return MAGMA_SUCCESS;
}
|
9d2331b5cbbec2d9f9892f403b9a628f856471f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <numeric>
#include <iostream>
#include <vector>
#include <algorithm>
#include <stdio.h>
#include <cufftMp.h>
#include <mpi.h>
#include "../common/error_checks.hpp"
#include "../common/generate_random.hpp"
#include "../common/scaling.cuh"
#include "../iterators/box_iterator.hpp"
/**
* This samples illustrates a basic use of cuFFTMp using custom data distributions
* in the case of an R2C - C2R transform
*
* It performs
* - forward transform
* - printing and scaling of the entries
* - inverse transform
*/
void run_r2c_c2r_pencils(size_t nx, size_t ny, size_t nz, float* cpu_data, Box3D box_real, Box3D box_complex, const int rank, const int size, MPI_Comm comm) {
// Initialize plans and stream
hipfftHandle plan_r2c = 0;
hipfftHandle plan_c2r = 0;
hipStream_t stream = nullptr;
CUDA_CHECK(hipStreamCreate(&stream));
CUFFT_CHECK(hipfftCreate(&plan_r2c));
CUFFT_CHECK(hipfftCreate(&plan_c2r));
// Attach the MPI communicator to the plans
CUFFT_CHECK(cufftMpAttachComm(plan_r2c, CUFFT_COMM_MPI, &comm));
CUFFT_CHECK(cufftMpAttachComm(plan_c2r, CUFFT_COMM_MPI, &comm));
// Describe the data distribution
// R2C plans only support CUFFT_XT_FORMAT_DISTRIBUTED_INPUT and always perform a HIPFFT_FORWARD transform
// C2R plans only support CUFFT_XT_FORMAT_DISTRIBUTED_OUTPUT ans always perform a HIPFFT_BACKWARD transform
// So, in both, the "input" box should be the real box and the "output" box should be the complex box
CUFFT_CHECK(cufftXtSetDistribution(plan_r2c, 3, box_real.lower, box_real.upper, box_complex.lower, box_complex.upper, box_real.strides, box_complex.strides));
CUFFT_CHECK(cufftXtSetDistribution(plan_c2r, 3, box_real.lower, box_real.upper, box_complex.lower, box_complex.upper, box_real.strides, box_complex.strides));
// Set the stream
CUFFT_CHECK(hipfftSetStream(plan_r2c, stream));
CUFFT_CHECK(hipfftSetStream(plan_c2r, stream));
// Make the plan
size_t workspace;
CUFFT_CHECK(hipfftMakePlan3d(plan_r2c, nx, ny, nz, HIPFFT_R2C, &workspace));
CUFFT_CHECK(hipfftMakePlan3d(plan_c2r, nx, ny, nz, HIPFFT_C2R, &workspace));
// Allocate GPU memory, copy CPU data to GPU
// Data is initially distributed according to CUFFT_XT_FORMAT_DISTRIBUTED_INPUT, i.e., box_real
cudaLibXtDesc *desc;
CUFFT_CHECK(cufftXtMalloc(plan_r2c, &desc, CUFFT_XT_FORMAT_DISTRIBUTED_INPUT));
CUFFT_CHECK(cufftXtMemcpy(plan_r2c, desc, cpu_data, CUFFT_COPY_HOST_TO_DEVICE));
// Run R2C
CUFFT_CHECK(cufftXtExecDescriptor(plan_r2c, desc, desc, HIPFFT_FORWARD));
// At this point, data is distributed according to CUFFT_XT_FORMAT_DISTRIBUTED_OUTPUT, i.e., box_complex
// This applies an element-wise scaling function to the GPU data located in desc->descriptor->data[0]
auto [begin_d, end_d] = BoxIterators(box_complex, (hipfftComplex*)desc->descriptor->data[0]);
const size_t num_elements = std::distance(begin_d, end_d);
const size_t num_threads = 128;
const size_t num_blocks = (num_elements + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( scaling_kernel), dim3(num_blocks), dim3(num_threads), 0, stream, begin_d, end_d, rank, size, nx, ny, nz);
// Run C2R
CUFFT_CHECK(cufftXtExecDescriptor(plan_c2r, desc, desc, HIPFFT_BACKWARD));
// Copy back to CPU and free
// Data is again distributed according to CUFFT_XT_FORMAT_DISTRIBUTED_INPUT, i.e., box_real
CUDA_CHECK(hipStreamSynchronize(stream));
CUFFT_CHECK(cufftXtMemcpy(plan_c2r, cpu_data, desc, CUFFT_COPY_DEVICE_TO_HOST));
CUFFT_CHECK(cufftXtFree(desc));
CUFFT_CHECK(hipfftDestroy(plan_r2c));
CUFFT_CHECK(hipfftDestroy(plan_c2r));
CUDA_CHECK(hipStreamDestroy(stream));
};
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int ndevices;
CUDA_CHECK(hipGetDeviceCount(&ndevices));
CUDA_CHECK(hipSetDevice(rank % ndevices));
printf("Hello from rank %d/%d using GPU %d\n", rank, size, rank % ndevices);
// Create pencils along Z for the input
int nranks1d = std::sqrt(size);
if(nranks1d * nranks1d != size) {
std::cout << "The number of MPI ranks should be a perfect square\n";
return 1;
}
// Define custom data distribution
int64 nx = 5;
int64 ny = 6;
int64 nz = 7;
int64 nz_real = nz;
int64 nz_complex = (nz/2+1);
int64 nz_real_padded = 2*nz_complex;
// Describe the data distribution using boxes
auto make_box = [](int64 lower[3], int64 upper[3], int64 strides[3]) {
Box3D box;
for(int i = 0; i < 3; i++) {
box.lower[i] = lower[i];
box.upper[i] = upper[i];
box.strides[i] = strides[i];
}
return box;
};
auto displacement = [](int64 length, int rank, int size) {
int ranks_cutoff = length % size;
return (rank < ranks_cutoff ? rank * (length / size + 1) : ranks_cutoff * (length / size + 1) + (rank - ranks_cutoff) * (length / size));
};
std::vector<Box3D> boxes_real;
std::vector<Box3D> boxes_complex;
for(int i = 0; i < nranks1d; i++) {
for(int j = 0; j < nranks1d; j++) {
{
// Input data are real pencils in X & Y, along Z
// Strides are packed and in-place (i.e., real is padded)
int64 lower[3] = {displacement(nx, i, nranks1d), displacement(ny, j, nranks1d), 0};
int64 upper[3] = {displacement(nx, i+1, nranks1d), displacement(ny, j+1, nranks1d), nz_real};
int64 strides[3] = {(upper[1]-lower[1])*nz_real_padded, nz_real_padded, 1};
boxes_real.push_back(make_box(lower, upper, strides));
}
{
// Output data are complex pencils in X & Z, along Y (picked arbitrarily)
// Strides are packed
// For best performances, the local dimension in the input (Z, here) and output (Y, here) should be different
// to ensure cuFFTMp will only perform two communication phases.
// If Z was also local in the output, cuFFTMp would perform three communication phases, decreasing performances.
int64 lower[3] = {displacement(nx, i, nranks1d), 0, displacement(nz_complex, j, nranks1d)};
int64 upper[3] = {displacement(nx, i+1, nranks1d), ny, displacement(nz_complex, j+1, nranks1d)};
int64 strides[3] = {(upper[1]-lower[1])*(upper[2]-lower[2]), (upper[2]-lower[2]), 1};
boxes_complex.push_back(make_box(lower, upper, strides));
}
}
}
// Generate CPU data
Box3D box_real = boxes_real[rank];
std::vector<float> input_cpu_data((box_real.upper[0] - box_real.lower[0]) * box_real.strides[0]);
generate_random(input_cpu_data, rank);
auto ref = input_cpu_data;
// Print input data
auto[in_begin_h, in_end_h] = BoxIterators(box_real, input_cpu_data.data());
for (auto it = in_begin_h; it != in_end_h; ++it) {
std::cout << "Input data, global 3D index [" << it.x() << "," << it.y() << "," << it.z() << "], local index " << it.i() << ", rank " << rank << " is " << *it << "\n";
}
// Compute a forward + normalization + inverse FFT
run_r2c_c2r_pencils(nx, ny, nz, input_cpu_data.data(), boxes_real[rank], boxes_complex[rank], rank, size, MPI_COMM_WORLD);
// Compute error
double error = compute_error(ref, input_cpu_data, box_real);
MPI_Finalize();
return assess_error(error);
}
| 9d2331b5cbbec2d9f9892f403b9a628f856471f1.cu | #include <numeric>
#include <iostream>
#include <vector>
#include <algorithm>
#include <stdio.h>
#include <cufftMp.h>
#include <mpi.h>
#include "../common/error_checks.hpp"
#include "../common/generate_random.hpp"
#include "../common/scaling.cuh"
#include "../iterators/box_iterator.hpp"
/**
* This samples illustrates a basic use of cuFFTMp using custom data distributions
* in the case of an R2C - C2R transform
*
* It performs
* - forward transform
* - printing and scaling of the entries
* - inverse transform
*/
void run_r2c_c2r_pencils(size_t nx, size_t ny, size_t nz, float* cpu_data, Box3D box_real, Box3D box_complex, const int rank, const int size, MPI_Comm comm) {
// Initialize plans and stream
cufftHandle plan_r2c = 0;
cufftHandle plan_c2r = 0;
cudaStream_t stream = nullptr;
CUDA_CHECK(cudaStreamCreate(&stream));
CUFFT_CHECK(cufftCreate(&plan_r2c));
CUFFT_CHECK(cufftCreate(&plan_c2r));
// Attach the MPI communicator to the plans
CUFFT_CHECK(cufftMpAttachComm(plan_r2c, CUFFT_COMM_MPI, &comm));
CUFFT_CHECK(cufftMpAttachComm(plan_c2r, CUFFT_COMM_MPI, &comm));
// Describe the data distribution
// R2C plans only support CUFFT_XT_FORMAT_DISTRIBUTED_INPUT and always perform a CUFFT_FORWARD transform
// C2R plans only support CUFFT_XT_FORMAT_DISTRIBUTED_OUTPUT ans always perform a CUFFT_INVERSE transform
// So, in both, the "input" box should be the real box and the "output" box should be the complex box
CUFFT_CHECK(cufftXtSetDistribution(plan_r2c, 3, box_real.lower, box_real.upper, box_complex.lower, box_complex.upper, box_real.strides, box_complex.strides));
CUFFT_CHECK(cufftXtSetDistribution(plan_c2r, 3, box_real.lower, box_real.upper, box_complex.lower, box_complex.upper, box_real.strides, box_complex.strides));
// Set the stream
CUFFT_CHECK(cufftSetStream(plan_r2c, stream));
CUFFT_CHECK(cufftSetStream(plan_c2r, stream));
// Make the plan
size_t workspace;
CUFFT_CHECK(cufftMakePlan3d(plan_r2c, nx, ny, nz, CUFFT_R2C, &workspace));
CUFFT_CHECK(cufftMakePlan3d(plan_c2r, nx, ny, nz, CUFFT_C2R, &workspace));
// Allocate GPU memory, copy CPU data to GPU
// Data is initially distributed according to CUFFT_XT_FORMAT_DISTRIBUTED_INPUT, i.e., box_real
cudaLibXtDesc *desc;
CUFFT_CHECK(cufftXtMalloc(plan_r2c, &desc, CUFFT_XT_FORMAT_DISTRIBUTED_INPUT));
CUFFT_CHECK(cufftXtMemcpy(plan_r2c, desc, cpu_data, CUFFT_COPY_HOST_TO_DEVICE));
// Run R2C
CUFFT_CHECK(cufftXtExecDescriptor(plan_r2c, desc, desc, CUFFT_FORWARD));
// At this point, data is distributed according to CUFFT_XT_FORMAT_DISTRIBUTED_OUTPUT, i.e., box_complex
// This applies an element-wise scaling function to the GPU data located in desc->descriptor->data[0]
auto [begin_d, end_d] = BoxIterators(box_complex, (cufftComplex*)desc->descriptor->data[0]);
const size_t num_elements = std::distance(begin_d, end_d);
const size_t num_threads = 128;
const size_t num_blocks = (num_elements + num_threads - 1) / num_threads;
scaling_kernel<<<num_blocks, num_threads, 0, stream>>>(begin_d, end_d, rank, size, nx, ny, nz);
// Run C2R
CUFFT_CHECK(cufftXtExecDescriptor(plan_c2r, desc, desc, CUFFT_INVERSE));
// Copy back to CPU and free
// Data is again distributed according to CUFFT_XT_FORMAT_DISTRIBUTED_INPUT, i.e., box_real
CUDA_CHECK(cudaStreamSynchronize(stream));
CUFFT_CHECK(cufftXtMemcpy(plan_c2r, cpu_data, desc, CUFFT_COPY_DEVICE_TO_HOST));
CUFFT_CHECK(cufftXtFree(desc));
CUFFT_CHECK(cufftDestroy(plan_r2c));
CUFFT_CHECK(cufftDestroy(plan_c2r));
CUDA_CHECK(cudaStreamDestroy(stream));
};
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int ndevices;
CUDA_CHECK(cudaGetDeviceCount(&ndevices));
CUDA_CHECK(cudaSetDevice(rank % ndevices));
printf("Hello from rank %d/%d using GPU %d\n", rank, size, rank % ndevices);
// Create pencils along Z for the input
int nranks1d = std::sqrt(size);
if(nranks1d * nranks1d != size) {
std::cout << "The number of MPI ranks should be a perfect square\n";
return 1;
}
// Define custom data distribution
int64 nx = 5;
int64 ny = 6;
int64 nz = 7;
int64 nz_real = nz;
int64 nz_complex = (nz/2+1);
int64 nz_real_padded = 2*nz_complex;
// Describe the data distribution using boxes
auto make_box = [](int64 lower[3], int64 upper[3], int64 strides[3]) {
Box3D box;
for(int i = 0; i < 3; i++) {
box.lower[i] = lower[i];
box.upper[i] = upper[i];
box.strides[i] = strides[i];
}
return box;
};
auto displacement = [](int64 length, int rank, int size) {
int ranks_cutoff = length % size;
return (rank < ranks_cutoff ? rank * (length / size + 1) : ranks_cutoff * (length / size + 1) + (rank - ranks_cutoff) * (length / size));
};
std::vector<Box3D> boxes_real;
std::vector<Box3D> boxes_complex;
for(int i = 0; i < nranks1d; i++) {
for(int j = 0; j < nranks1d; j++) {
{
// Input data are real pencils in X & Y, along Z
// Strides are packed and in-place (i.e., real is padded)
int64 lower[3] = {displacement(nx, i, nranks1d), displacement(ny, j, nranks1d), 0};
int64 upper[3] = {displacement(nx, i+1, nranks1d), displacement(ny, j+1, nranks1d), nz_real};
int64 strides[3] = {(upper[1]-lower[1])*nz_real_padded, nz_real_padded, 1};
boxes_real.push_back(make_box(lower, upper, strides));
}
{
// Output data are complex pencils in X & Z, along Y (picked arbitrarily)
// Strides are packed
// For best performances, the local dimension in the input (Z, here) and output (Y, here) should be different
// to ensure cuFFTMp will only perform two communication phases.
// If Z was also local in the output, cuFFTMp would perform three communication phases, decreasing performances.
int64 lower[3] = {displacement(nx, i, nranks1d), 0, displacement(nz_complex, j, nranks1d)};
int64 upper[3] = {displacement(nx, i+1, nranks1d), ny, displacement(nz_complex, j+1, nranks1d)};
int64 strides[3] = {(upper[1]-lower[1])*(upper[2]-lower[2]), (upper[2]-lower[2]), 1};
boxes_complex.push_back(make_box(lower, upper, strides));
}
}
}
// Generate CPU data
Box3D box_real = boxes_real[rank];
std::vector<float> input_cpu_data((box_real.upper[0] - box_real.lower[0]) * box_real.strides[0]);
generate_random(input_cpu_data, rank);
auto ref = input_cpu_data;
// Print input data
auto[in_begin_h, in_end_h] = BoxIterators(box_real, input_cpu_data.data());
for (auto it = in_begin_h; it != in_end_h; ++it) {
std::cout << "Input data, global 3D index [" << it.x() << "," << it.y() << "," << it.z() << "], local index " << it.i() << ", rank " << rank << " is " << *it << "\n";
}
// Compute a forward + normalization + inverse FFT
run_r2c_c2r_pencils(nx, ny, nz, input_cpu_data.data(), boxes_real[rank], boxes_complex[rank], rank, size, MPI_COMM_WORLD);
// Compute error
double error = compute_error(ref, input_cpu_data, box_real);
MPI_Finalize();
return assess_error(error);
}
|
a4de294888ac359f738456dcba44494bf7150517.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cmath>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/cuml.hpp>
#include <random>
#include <vector>
#include <common/cudart_utils.h>
#include <test_utils.h>
#include <metrics/batched/information_criterion.cuh>
#include "../test_utils.h"
namespace MLCommon {
namespace Metrics {
namespace Batched {
template <typename T>
void naive_ic(T *h_ic, const T *h_loglike, IC_Type ic_type, int n_params,
int batch_size, int n_samples) {
T ic_base;
T N = static_cast<T>(n_params);
T M = static_cast<T>(n_samples);
switch (ic_type) {
case AIC:
ic_base = (T)2 * N;
break;
case AICc:
ic_base = (T)2 * (N + (N * (N + (T)1)) / (M - N - (T)1));
break;
case BIC:
ic_base = ::log(M) * N;
break;
}
#pragma omp parallel for
for (int bid = 0; bid < batch_size; bid++) {
h_ic[bid] = ic_base - (T)2.0 * h_loglike[bid];
}
}
template <typename T>
struct BatchedICInputs {
int batch_size;
int n_params;
int n_samples;
IC_Type ic_type;
T tolerance;
};
template <typename T>
class BatchedICTest : public ::testing::TestWithParam<BatchedICInputs<T>> {
protected:
void SetUp() override {
using std::vector;
params = ::testing::TestWithParam<BatchedICInputs<T>>::GetParam();
// Create stream and allocator
CUDA_CHECK(hipStreamCreate(&stream));
allocator = std::make_shared<raft::mr::device::default_allocator>();
// Create arrays
std::vector<T> loglike_h = std::vector<T>(params.batch_size);
res_h.resize(params.batch_size);
T *loglike_d =
(T *)allocator->allocate(sizeof(T) * params.batch_size, stream);
res_d = (T *)allocator->allocate(sizeof(T) * params.batch_size, stream);
// Generate random data
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<T> udis(0.001, 1.0); // 0 has no log
for (int i = 0; i < params.batch_size; i++)
loglike_h[i] = ::log(udis(gen));
// Copy the data to the device
raft::update_device(loglike_d, loglike_h.data(), params.batch_size, stream);
// Compute the tested results
information_criterion(res_d, loglike_d, params.ic_type, params.n_params,
params.batch_size, params.n_samples, stream);
// Compute the expected results
naive_ic(res_h.data(), loglike_h.data(), params.ic_type, params.n_params,
params.batch_size, params.n_samples);
allocator->deallocate(loglike_d, sizeof(T) * params.batch_size, stream);
}
void TearDown() override {
allocator->deallocate(res_d, sizeof(T) * params.batch_size, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
std::shared_ptr<raft::mr::device::default_allocator> allocator;
BatchedICInputs<T> params;
T *res_d;
std::vector<T> res_h;
hipStream_t stream;
};
// Test parameters (op, n_batches, m, n, p, q, tolerance)
const std::vector<BatchedICInputs<double>> inputsd = {
{1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}};
// Test parameters (op, n_batches, m, n, p, q, tolerance)
const std::vector<BatchedICInputs<float>> inputsf = {
{1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}};
using BatchedICTestD = BatchedICTest<double>;
using BatchedICTestF = BatchedICTest<float>;
TEST_P(BatchedICTestD, Result) {
ASSERT_TRUE(devArrMatchHost(res_h.data(), res_d, params.batch_size,
raft::CompareApprox<double>(params.tolerance),
stream));
}
TEST_P(BatchedICTestF, Result) {
ASSERT_TRUE(devArrMatchHost(res_h.data(), res_d, params.batch_size,
raft::CompareApprox<float>(params.tolerance),
stream));
}
INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestF,
::testing::ValuesIn(inputsf));
} // namespace Batched
} // namespace Metrics
} // namespace MLCommon
| a4de294888ac359f738456dcba44494bf7150517.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cmath>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/cuml.hpp>
#include <random>
#include <vector>
#include <common/cudart_utils.h>
#include <test_utils.h>
#include <metrics/batched/information_criterion.cuh>
#include "../test_utils.h"
namespace MLCommon {
namespace Metrics {
namespace Batched {
template <typename T>
void naive_ic(T *h_ic, const T *h_loglike, IC_Type ic_type, int n_params,
int batch_size, int n_samples) {
T ic_base;
T N = static_cast<T>(n_params);
T M = static_cast<T>(n_samples);
switch (ic_type) {
case AIC:
ic_base = (T)2 * N;
break;
case AICc:
ic_base = (T)2 * (N + (N * (N + (T)1)) / (M - N - (T)1));
break;
case BIC:
ic_base = std::log(M) * N;
break;
}
#pragma omp parallel for
for (int bid = 0; bid < batch_size; bid++) {
h_ic[bid] = ic_base - (T)2.0 * h_loglike[bid];
}
}
template <typename T>
struct BatchedICInputs {
int batch_size;
int n_params;
int n_samples;
IC_Type ic_type;
T tolerance;
};
template <typename T>
class BatchedICTest : public ::testing::TestWithParam<BatchedICInputs<T>> {
protected:
void SetUp() override {
using std::vector;
params = ::testing::TestWithParam<BatchedICInputs<T>>::GetParam();
// Create stream and allocator
CUDA_CHECK(cudaStreamCreate(&stream));
allocator = std::make_shared<raft::mr::device::default_allocator>();
// Create arrays
std::vector<T> loglike_h = std::vector<T>(params.batch_size);
res_h.resize(params.batch_size);
T *loglike_d =
(T *)allocator->allocate(sizeof(T) * params.batch_size, stream);
res_d = (T *)allocator->allocate(sizeof(T) * params.batch_size, stream);
// Generate random data
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<T> udis(0.001, 1.0); // 0 has no log
for (int i = 0; i < params.batch_size; i++)
loglike_h[i] = std::log(udis(gen));
// Copy the data to the device
raft::update_device(loglike_d, loglike_h.data(), params.batch_size, stream);
// Compute the tested results
information_criterion(res_d, loglike_d, params.ic_type, params.n_params,
params.batch_size, params.n_samples, stream);
// Compute the expected results
naive_ic(res_h.data(), loglike_h.data(), params.ic_type, params.n_params,
params.batch_size, params.n_samples);
allocator->deallocate(loglike_d, sizeof(T) * params.batch_size, stream);
}
void TearDown() override {
allocator->deallocate(res_d, sizeof(T) * params.batch_size, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
std::shared_ptr<raft::mr::device::default_allocator> allocator;
BatchedICInputs<T> params;
T *res_d;
std::vector<T> res_h;
cudaStream_t stream;
};
// Test parameters (op, n_batches, m, n, p, q, tolerance)
const std::vector<BatchedICInputs<double>> inputsd = {
{1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}};
// Test parameters (op, n_batches, m, n, p, q, tolerance)
const std::vector<BatchedICInputs<float>> inputsf = {
{1, 5, 52, AIC, 1e-3}, {10, 7, 100, AICc, 1e-3}, {67, 2, 350, BIC, 1e-3}};
using BatchedICTestD = BatchedICTest<double>;
using BatchedICTestF = BatchedICTest<float>;
TEST_P(BatchedICTestD, Result) {
ASSERT_TRUE(devArrMatchHost(res_h.data(), res_d, params.batch_size,
raft::CompareApprox<double>(params.tolerance),
stream));
}
TEST_P(BatchedICTestF, Result) {
ASSERT_TRUE(devArrMatchHost(res_h.data(), res_d, params.batch_size,
raft::CompareApprox<float>(params.tolerance),
stream));
}
INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(BatchedICTests, BatchedICTestF,
::testing::ValuesIn(inputsf));
} // namespace Batched
} // namespace Metrics
} // namespace MLCommon
|
2401598927e86d2edc210324a4e8cc203d79571d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "magnetic_orientation.h"
#include <mirheo/core/pvs/rigid_object_vector.h>
#include <mirheo/core/pvs/views/rov.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/utils/quaternion.h>
namespace mirheo
{
namespace magnetic_orientation_plugin_kernels
{
__global__ void applyMagneticField(ROVview view, real3 B, real3 M)
{
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid >= view.nObjects) return;
const auto q = static_cast<Quaternion<real>>(view.motions[gid].q);
M = q.rotate(M);
const real3 T = cross(M, B);
atomicAdd(&view.motions[gid].torque.x, static_cast<RigidReal>(T.x));
atomicAdd(&view.motions[gid].torque.y, static_cast<RigidReal>(T.y));
atomicAdd(&view.motions[gid].torque.z, static_cast<RigidReal>(T.z));
}
} // namespace magnetic_orientation_plugin_kernels
MagneticOrientationPlugin::MagneticOrientationPlugin(const MirState *state, std::string name, std::string rovName,
real3 moment, UniformMagneticFunc magneticFunction) :
SimulationPlugin(state, name),
rovName_(rovName),
moment_(moment),
magneticFunction_(magneticFunction)
{}
void MagneticOrientationPlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
rov_ = dynamic_cast<RigidObjectVector*>( simulation->getOVbyNameOrDie(rovName_) );
if (rov_ == nullptr)
die("Need rigid object vector to interact with magnetic field, plugin '%s', OV name '%s'",
getCName(), rovName_.c_str());
}
void MagneticOrientationPlugin::beforeForces(hipStream_t stream)
{
ROVview view(rov_, rov_->local());
const int nthreads = 128;
const auto t = getState()->currentTime;
const auto B = magneticFunction_(t);
SAFE_KERNEL_LAUNCH(
magnetic_orientation_plugin_kernels::applyMagneticField,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, B, moment_);
}
} // namespace mirheo
| 2401598927e86d2edc210324a4e8cc203d79571d.cu | #include "magnetic_orientation.h"
#include <mirheo/core/pvs/rigid_object_vector.h>
#include <mirheo/core/pvs/views/rov.h>
#include <mirheo/core/simulation.h>
#include <mirheo/core/utils/cuda_common.h>
#include <mirheo/core/utils/kernel_launch.h>
#include <mirheo/core/utils/quaternion.h>
namespace mirheo
{
namespace magnetic_orientation_plugin_kernels
{
__global__ void applyMagneticField(ROVview view, real3 B, real3 M)
{
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid >= view.nObjects) return;
const auto q = static_cast<Quaternion<real>>(view.motions[gid].q);
M = q.rotate(M);
const real3 T = cross(M, B);
atomicAdd(&view.motions[gid].torque.x, static_cast<RigidReal>(T.x));
atomicAdd(&view.motions[gid].torque.y, static_cast<RigidReal>(T.y));
atomicAdd(&view.motions[gid].torque.z, static_cast<RigidReal>(T.z));
}
} // namespace magnetic_orientation_plugin_kernels
MagneticOrientationPlugin::MagneticOrientationPlugin(const MirState *state, std::string name, std::string rovName,
real3 moment, UniformMagneticFunc magneticFunction) :
SimulationPlugin(state, name),
rovName_(rovName),
moment_(moment),
magneticFunction_(magneticFunction)
{}
void MagneticOrientationPlugin::setup(Simulation *simulation, const MPI_Comm& comm, const MPI_Comm& interComm)
{
SimulationPlugin::setup(simulation, comm, interComm);
rov_ = dynamic_cast<RigidObjectVector*>( simulation->getOVbyNameOrDie(rovName_) );
if (rov_ == nullptr)
die("Need rigid object vector to interact with magnetic field, plugin '%s', OV name '%s'",
getCName(), rovName_.c_str());
}
void MagneticOrientationPlugin::beforeForces(cudaStream_t stream)
{
ROVview view(rov_, rov_->local());
const int nthreads = 128;
const auto t = getState()->currentTime;
const auto B = magneticFunction_(t);
SAFE_KERNEL_LAUNCH(
magnetic_orientation_plugin_kernels::applyMagneticField,
getNblocks(view.size, nthreads), nthreads, 0, stream,
view, B, moment_);
}
} // namespace mirheo
|
11cd2fbc0c0334546f69b860ef385b4b4fb1b6cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "parse_oo.h"
__global__ void initContext(GraphChiContext* context, int vertices, int edges) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid == 0) {
context->setNumIterations(0);
context->setNumVertices(vertices);
context->setNumEdges(edges);
}
}
__global__ void initObject(ChiVertex<int, int>** vertex,
GraphChiContext* context, int* row, int* col,
int* inrow, int* incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
vertex[tid] = new ChiVertex<int, int>(tid, indegree, outdegree);
for (int i = in_start; i < in_end; i++) {
vertex[tid]->setInEdge(i - in_start, incol[i], 0);
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
__global__ void initOutEdge(ChiVertex<int, int>** vertex,
GraphChiContext* context, int* row, int* col) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0);
}
}
}
__global__ void ConnectedComponent(ChiVertex<int, int>** vertex,
GraphChiContext* context, int iteration) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int numEdges;
numEdges = vertex[tid]->numEdges();
if (iteration == 0) {
int vid = vertex[tid]->getId();
vertex[tid]->setValue(vid);
}
int curMin;
curMin = vertex[tid]->getValue();
for (int i = 0; i < numEdges; i++) {
ChiEdge<int>* edge;
edge = vertex[tid]->edge(i);
int nbLabel;
nbLabel = edge->getValue();
if (iteration == 0) {
nbLabel = edge->getVertexId(); // Note!
}
if (nbLabel < curMin) {
curMin = nbLabel;
}
}
/**
* Set my new label
*/
vertex[tid]->setValue(curMin);
int label = curMin;
/**
* Broadcast my value to neighbors by writing the value to my edges.
*/
if (iteration > 0) {
for (int i = 0; i < numEdges; i++) {
ChiEdge<int>* edge;
edge = vertex[tid]->edge(i);
int edgeValue;
edgeValue = edge->getValue();
if (edgeValue > label) {
edge->setValue(label);
}
}
} else {
// Special case for first iteration to avoid overwriting
int numOutEdge;
numOutEdge = vertex[tid]->numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int>* outEdge;
outEdge = vertex[tid]->getOutEdge(i);
outEdge->setValue(label);
}
}
}
}
__global__ void copyBack(ChiVertex<int, int>** vertex, GraphChiContext* context,
int* cc) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
cc[tid] = vertex[tid]->getValue();
}
}
| 11cd2fbc0c0334546f69b860ef385b4b4fb1b6cf.cu | #include "parse_oo.h"
__global__ void initContext(GraphChiContext* context, int vertices, int edges) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid == 0) {
context->setNumIterations(0);
context->setNumVertices(vertices);
context->setNumEdges(edges);
}
}
__global__ void initObject(ChiVertex<int, int>** vertex,
GraphChiContext* context, int* row, int* col,
int* inrow, int* incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
vertex[tid] = new ChiVertex<int, int>(tid, indegree, outdegree);
for (int i = in_start; i < in_end; i++) {
vertex[tid]->setInEdge(i - in_start, incol[i], 0);
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
__global__ void initOutEdge(ChiVertex<int, int>** vertex,
GraphChiContext* context, int* row, int* col) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0);
}
}
}
__global__ void ConnectedComponent(ChiVertex<int, int>** vertex,
GraphChiContext* context, int iteration) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int numEdges;
numEdges = vertex[tid]->numEdges();
if (iteration == 0) {
int vid = vertex[tid]->getId();
vertex[tid]->setValue(vid);
}
int curMin;
curMin = vertex[tid]->getValue();
for (int i = 0; i < numEdges; i++) {
ChiEdge<int>* edge;
edge = vertex[tid]->edge(i);
int nbLabel;
nbLabel = edge->getValue();
if (iteration == 0) {
nbLabel = edge->getVertexId(); // Note!
}
if (nbLabel < curMin) {
curMin = nbLabel;
}
}
/**
* Set my new label
*/
vertex[tid]->setValue(curMin);
int label = curMin;
/**
* Broadcast my value to neighbors by writing the value to my edges.
*/
if (iteration > 0) {
for (int i = 0; i < numEdges; i++) {
ChiEdge<int>* edge;
edge = vertex[tid]->edge(i);
int edgeValue;
edgeValue = edge->getValue();
if (edgeValue > label) {
edge->setValue(label);
}
}
} else {
// Special case for first iteration to avoid overwriting
int numOutEdge;
numOutEdge = vertex[tid]->numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int>* outEdge;
outEdge = vertex[tid]->getOutEdge(i);
outEdge->setValue(label);
}
}
}
}
__global__ void copyBack(ChiVertex<int, int>** vertex, GraphChiContext* context,
int* cc) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
cc[tid] = vertex[tid]->getValue();
}
}
|
cbab8ffc388ecfbbad1ccbd865b6ea724e565add.hip | // !!! This is a file automatically generated by hipify!!!
#include <complex>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cusp/add.cuh>
#include <cusp/helper_cuda.h>
namespace cusp {
template <typename T>
__global__ void kernel_add(const T **ins, T *out, int ninputs, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
T *in = (T *)(*ins);
out[i] = in[i];
for (int j = 1; j < ninputs; j++) {
in = (T*)(*(ins+j));
out[i] += in[i]; //(*(in + j))[i];
}
}
}
template <>
__global__ void kernel_add<thrust::complex<float>>(const thrust::complex<float> **ins,
thrust::complex<float> *out,
int ninputs, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
thrust::complex<float> *in = (thrust::complex<float> *)(*ins);
out[i] = in[i];
for (int j = 1; j < ninputs; j++) {
in = (thrust::complex<float>*)(*(ins+j));
out[i] += in[i]; //(*(in + j))[i];
}
}
}
template <typename T> add<T>::add(int ninputs) : _ninputs(ninputs) {
checkCudaErrors(hipMalloc(&_dev_ptr_array, sizeof(void *) * _ninputs));
}
template <typename T>
hipError_t add<T>::launch(const std::vector<const void *>& inputs, T *output,
int ninputs, int grid_size, int block_size,
size_t nitems, hipStream_t stream) {
// There is a better way to do this here - just getting the pointers into
// device memory
checkCudaErrors(hipMemcpy(_dev_ptr_array, inputs.data(), sizeof(void *) * ninputs,
hipMemcpyHostToDevice));
if (stream) {
hipLaunchKernelGGL(( kernel_add), dim3(grid_size), dim3(block_size), 0, stream, (const T **)_dev_ptr_array,
output, ninputs, nitems);
} else {
hipLaunchKernelGGL(( kernel_add), dim3(grid_size), dim3(block_size), 0, 0, (const T **)_dev_ptr_array, output,
ninputs, nitems);
}
return hipPeekAtLastError();
}
template <>
hipError_t add<std::complex<float>>::launch(const std::vector<const void *>& inputs,
std::complex<float> *output,
int ninputs, int grid_size, int block_size,
size_t nitems, hipStream_t stream) {
// There is a better way to do this here - just getting the pointers into
// device memory
checkCudaErrors(hipMemcpy(_dev_ptr_array, inputs.data(), sizeof(void *) * ninputs,
hipMemcpyHostToDevice));
if (stream) {
hipLaunchKernelGGL(( kernel_add), dim3(grid_size), dim3(block_size), 0, stream, (const thrust::complex<float> **)_dev_ptr_array,
(thrust::complex<float> *)output, ninputs, nitems);
} else {
hipLaunchKernelGGL(( kernel_add), dim3(grid_size), dim3(block_size), 0, 0, (const thrust::complex<float> **)_dev_ptr_array,
(thrust::complex<float> *) output,
ninputs, nitems);
}
return hipPeekAtLastError();
}
template <typename T>
hipError_t add<T>::launch(const std::vector<const void *>& inputs,
const std::vector<void *>& outputs, size_t nitems) {
return launch(inputs, (T *)outputs[0], _ninputs, _grid_size, _block_size,
nitems, _stream);
}
template <typename T>
hipError_t add<T>::occupancy(int *minBlock, int *minGrid) {
return hipOccupancyMaxPotentialBlockSize(minGrid, minBlock, kernel_add<T>, 0,
0);
}
template <>
hipError_t add<std::complex<float>>::occupancy(int *minBlock, int *minGrid) {
return hipOccupancyMaxPotentialBlockSize(minGrid, minBlock,
kernel_add<thrust::complex<float>>,
0, 0);
}
#define IMPLEMENT_KERNEL(T) template class add<T>;
IMPLEMENT_KERNEL(int8_t)
IMPLEMENT_KERNEL(int16_t)
IMPLEMENT_KERNEL(int32_t)
IMPLEMENT_KERNEL(int64_t)
IMPLEMENT_KERNEL(float)
IMPLEMENT_KERNEL(std::complex<float>)
} // namespace cusp | cbab8ffc388ecfbbad1ccbd865b6ea724e565add.cu | #include <complex>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cusp/add.cuh>
#include <cusp/helper_cuda.h>
namespace cusp {
template <typename T>
__global__ void kernel_add(const T **ins, T *out, int ninputs, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
T *in = (T *)(*ins);
out[i] = in[i];
for (int j = 1; j < ninputs; j++) {
in = (T*)(*(ins+j));
out[i] += in[i]; //(*(in + j))[i];
}
}
}
template <>
__global__ void kernel_add<thrust::complex<float>>(const thrust::complex<float> **ins,
thrust::complex<float> *out,
int ninputs, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
thrust::complex<float> *in = (thrust::complex<float> *)(*ins);
out[i] = in[i];
for (int j = 1; j < ninputs; j++) {
in = (thrust::complex<float>*)(*(ins+j));
out[i] += in[i]; //(*(in + j))[i];
}
}
}
template <typename T> add<T>::add(int ninputs) : _ninputs(ninputs) {
checkCudaErrors(cudaMalloc(&_dev_ptr_array, sizeof(void *) * _ninputs));
}
template <typename T>
cudaError_t add<T>::launch(const std::vector<const void *>& inputs, T *output,
int ninputs, int grid_size, int block_size,
size_t nitems, cudaStream_t stream) {
// There is a better way to do this here - just getting the pointers into
// device memory
checkCudaErrors(cudaMemcpy(_dev_ptr_array, inputs.data(), sizeof(void *) * ninputs,
cudaMemcpyHostToDevice));
if (stream) {
kernel_add<<<grid_size, block_size, 0, stream>>>((const T **)_dev_ptr_array,
output, ninputs, nitems);
} else {
kernel_add<<<grid_size, block_size>>>((const T **)_dev_ptr_array, output,
ninputs, nitems);
}
return cudaPeekAtLastError();
}
template <>
cudaError_t add<std::complex<float>>::launch(const std::vector<const void *>& inputs,
std::complex<float> *output,
int ninputs, int grid_size, int block_size,
size_t nitems, cudaStream_t stream) {
// There is a better way to do this here - just getting the pointers into
// device memory
checkCudaErrors(cudaMemcpy(_dev_ptr_array, inputs.data(), sizeof(void *) * ninputs,
cudaMemcpyHostToDevice));
if (stream) {
kernel_add<<<grid_size, block_size, 0, stream>>>((const thrust::complex<float> **)_dev_ptr_array,
(thrust::complex<float> *)output, ninputs, nitems);
} else {
kernel_add<<<grid_size, block_size>>>((const thrust::complex<float> **)_dev_ptr_array,
(thrust::complex<float> *) output,
ninputs, nitems);
}
return cudaPeekAtLastError();
}
template <typename T>
cudaError_t add<T>::launch(const std::vector<const void *>& inputs,
const std::vector<void *>& outputs, size_t nitems) {
return launch(inputs, (T *)outputs[0], _ninputs, _grid_size, _block_size,
nitems, _stream);
}
template <typename T>
cudaError_t add<T>::occupancy(int *minBlock, int *minGrid) {
return cudaOccupancyMaxPotentialBlockSize(minGrid, minBlock, kernel_add<T>, 0,
0);
}
template <>
cudaError_t add<std::complex<float>>::occupancy(int *minBlock, int *minGrid) {
return cudaOccupancyMaxPotentialBlockSize(minGrid, minBlock,
kernel_add<thrust::complex<float>>,
0, 0);
}
#define IMPLEMENT_KERNEL(T) template class add<T>;
IMPLEMENT_KERNEL(int8_t)
IMPLEMENT_KERNEL(int16_t)
IMPLEMENT_KERNEL(int32_t)
IMPLEMENT_KERNEL(int64_t)
IMPLEMENT_KERNEL(float)
IMPLEMENT_KERNEL(std::complex<float>)
} // namespace cusp |
89f42db09d55f021c9390696cf713aefec2e5932.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
__global__
void lambdaTestKernel(int *num)
{
auto func = [&] () { *num = 5; };
func();
}
void testDevice()
{
int num = 0;
int *d_num;
hipMalloc(&d_num, sizeof(int));
hipMemcpy(d_num, &num, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( lambdaTestKernel) , dim3(1), dim3(1), 0, 0, d_num);
hipMemcpy(&num, d_num, sizeof(int), hipMemcpyDeviceToHost);
cout << num << endl;
}
void testHost()
{
char str[] = "Another Hello World!";
void (*func1)() = [] () { cout << "Hello world" << endl; };
func1();
auto func2 = [] () { cout << "Hello world" << endl; };
func2();
auto func3 = [&] () { cout << str << endl; };
func3();
}
int main()
{
testHost();
testDevice();
return 0;
} | 89f42db09d55f021c9390696cf713aefec2e5932.cu | #include <iostream>
using namespace std;
__global__
void lambdaTestKernel(int *num)
{
auto func = [&] () { *num = 5; };
func();
}
void testDevice()
{
int num = 0;
int *d_num;
cudaMalloc(&d_num, sizeof(int));
cudaMemcpy(d_num, &num, sizeof(int), cudaMemcpyHostToDevice);
lambdaTestKernel <<<1, 1>>> (d_num);
cudaMemcpy(&num, d_num, sizeof(int), cudaMemcpyDeviceToHost);
cout << num << endl;
}
void testHost()
{
char str[] = "Another Hello World!";
void (*func1)() = [] () { cout << "Hello world" << endl; };
func1();
auto func2 = [] () { cout << "Hello world" << endl; };
func2();
auto func3 = [&] () { cout << str << endl; };
func3();
}
int main()
{
testHost();
testDevice();
return 0;
} |
87e311ecedf3b567500d7629ca823d15efa521b4.hip | // !!! This is a file automatically generated by hipify!!!
//
// File: DeepLearningNetwork.cu
//
// GPU Coder version : 2.0
// CUDA/C/C++ source code generated on : 15-Dec-2020 12:44:50
//
// Include Files
#include "DeepLearningNetwork.h"
#include "detect_lane_internal_types.h"
#include "MWElementwiseAffineLayer.hpp"
#include "MWFusedConvReLULayer.hpp"
#include "MWTargetNetworkImpl.hpp"
#include "cnn_api.hpp"
#include <cstdio>
const char *errorString =
"Abnormal termination due to: %s.\nError in %s (line %d).";
// Function Declarations
static void checkCleanupCudaError(hipError_t errCode, const char *file,
unsigned int line);
// Function Definitions
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::allocate() {
this->targetImpl->allocate(290400, 2);
for (int idx = 0; idx < 18; idx++) {
this->layers[idx]->allocate();
}
(static_cast<MWTensor<float> *>(this->inputTensors[0]))->setData(this->layers
[0]->getLayerOutput(0));
}
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::cleanup() {
this->deallocate();
for (int idx = 0; idx < 18; idx++) {
this->layers[idx]->cleanup();
}
if (this->targetImpl) {
this->targetImpl->cleanup();
}
}
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::deallocate() {
this->targetImpl->deallocate();
for (int idx = 0; idx < 18; idx++) {
this->layers[idx]->deallocate();
}
}
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::postsetup() {
this->targetImpl->postSetup(this->layers, this->numLayers);
}
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::setSize() {
for (int idx = 0; idx < 18; idx++) {
this->layers[idx]->propagateSize();
}
this->allocate();
this->postsetup();
}
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::setup() {
this->targetImpl->preSetup();
this->targetImpl->setAutoTune(true);
(static_cast<MWInputLayer *>(this->layers[0]))->createInputLayer
(this->targetImpl, this->inputTensors[0], 227, 227, 3, 0, "", 0);
(static_cast<MWElementwiseAffineLayer *>(this->layers[1]))
->createElementwiseAffineLayer(this->targetImpl, this->layers[0]
->getOutputTensor(0), 227, 227, 3, 227, 227, 3, false, 1, 1,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_data_scale.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_data_offset.bin", 0);
(static_cast<MWFusedConvReLULayer *>(this->layers[2]))
->createFusedConvReLULayer(this->targetImpl, 1, this->layers[1]
->getOutputTensor(0), 11, 11, 3, 96, 4, 4, 0, 0, 0, 0, 1, 1, 1,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv1_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv1_b.bin", 1);
(static_cast<MWNormLayer *>(this->layers[3]))->createNormLayer
(this->targetImpl, this->layers[2]->getOutputTensor(0), 5, 0.0001, 0.75, 1.0,
0);
(static_cast<MWMaxPoolingLayer *>(this->layers[4]))->createMaxPoolingLayer
(this->targetImpl, this->layers[3]->getOutputTensor(0), 3, 3, 2, 2, 0, 0, 0,
0, 0, 1, 1);
(static_cast<MWFusedConvReLULayer *>(this->layers[5]))
->createFusedConvReLULayer(this->targetImpl, 1, this->layers[4]
->getOutputTensor(0), 5, 5, 48, 128, 1, 1, 2, 2, 2, 2, 1, 1, 2,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv2_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv2_b.bin", 0);
(static_cast<MWNormLayer *>(this->layers[6]))->createNormLayer
(this->targetImpl, this->layers[5]->getOutputTensor(0), 5, 0.0001, 0.75, 1.0,
1);
(static_cast<MWMaxPoolingLayer *>(this->layers[7]))->createMaxPoolingLayer
(this->targetImpl, this->layers[6]->getOutputTensor(0), 3, 3, 2, 2, 0, 0, 0,
0, 0, 1, 0);
(static_cast<MWFusedConvReLULayer *>(this->layers[8]))
->createFusedConvReLULayer(this->targetImpl, 1, this->layers[7]
->getOutputTensor(0), 3, 3, 256, 384, 1, 1, 1, 1, 1, 1, 1, 1, 1,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv3_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv3_b.bin", 1);
(static_cast<MWFusedConvReLULayer *>(this->layers[9]))
->createFusedConvReLULayer(this->targetImpl, 1, this->layers[8]
->getOutputTensor(0), 3, 3, 192, 192, 1, 1, 1, 1, 1, 1, 1, 1, 2,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv4_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv4_b.bin", 0);
(static_cast<MWFusedConvReLULayer *>(this->layers[10]))
->createFusedConvReLULayer(this->targetImpl, 1, this->layers[9]
->getOutputTensor(0), 3, 3, 192, 128, 1, 1, 1, 1, 1, 1, 1, 1, 2,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv5_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv5_b.bin", 1);
(static_cast<MWMaxPoolingLayer *>(this->layers[11]))->createMaxPoolingLayer
(this->targetImpl, this->layers[10]->getOutputTensor(0), 3, 3, 2, 2, 0, 0, 0,
0, 0, 1, 0);
(static_cast<MWFCLayer *>(this->layers[12]))->createFCLayer(this->targetImpl,
this->layers[11]->getOutputTensor(0), 9216, 4096,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_fc6_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_fc6_b.bin",
1);
(static_cast<MWReLULayer *>(this->layers[13]))->createReLULayer
(this->targetImpl, this->layers[12]->getOutputTensor(0), 1);
(static_cast<MWFCLayer *>(this->layers[14]))->createFCLayer(this->targetImpl,
this->layers[13]->getOutputTensor(0), 4096, 16,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_fcLane1_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_fcLane1_b.bin",
0);
(static_cast<MWReLULayer *>(this->layers[15]))->createReLULayer
(this->targetImpl, this->layers[14]->getOutputTensor(0), 0);
(static_cast<MWFCLayer *>(this->layers[16]))->createFCLayer(this->targetImpl,
this->layers[15]->getOutputTensor(0), 16, 6,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_fcLane2_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_fcLane2_b.bin",
1);
(static_cast<MWOutputLayer *>(this->layers[17]))->createOutputLayer
(this->targetImpl, this->layers[16]->getOutputTensor(0), 1);
this->outputTensors[0] = this->layers[17]->getOutputTensor(0);
this->setSize();
}
//
// Arguments : hipError_t errCode
// const char *file
// unsigned int line
// Return Type : void
//
static void checkCleanupCudaError(hipError_t errCode, const char *file,
unsigned int line) {
if ((errCode != hipSuccess) && (errCode != hipErrorDeinitialized)) {
printf(errorString, hipGetErrorString(errCode), file, line);
}
}
//
// Arguments : void
// Return Type : int
//
int lanenet0_0::getBatchSize() {
return this->inputTensors[0]->getBatchSize();
}
//
// Arguments : int b_index
// Return Type : float *
//
float *lanenet0_0::getInputDataPointer(int b_index) {
return (static_cast<MWTensor<float> *>(this->inputTensors[b_index]))->getData();
}
//
// Arguments : void
// Return Type : float *
//
float *lanenet0_0::getInputDataPointer() {
return (static_cast<MWTensor<float> *>(this->inputTensors[0]))->getData();
}
//
// Arguments : int layerIndex
// int portIndex
// Return Type : float *
//
float *lanenet0_0::getLayerOutput(int layerIndex, int portIndex) {
return this->layers[layerIndex]->getLayerOutput(portIndex);
}
//
// Arguments : int b_index
// Return Type : float *
//
float *lanenet0_0::getOutputDataPointer(int b_index) {
return (static_cast<MWTensor<float> *>(this->outputTensors[b_index]))->getData
();
}
//
// Arguments : void
// Return Type : float *
//
float *lanenet0_0::getOutputDataPointer() {
return (static_cast<MWTensor<float> *>(this->outputTensors[0]))->getData();
}
//
// Arguments : void
// Return Type : void
//
lanenet0_0::lanenet0_0() {
this->numLayers = 18;
this->targetImpl = 0;
this->layers[0] = new MWInputLayer;
this->layers[0]->setName("data");
this->layers[1] = new MWElementwiseAffineLayer;
this->layers[1]->setName("data_normalization");
this->layers[1]->setInPlaceIndex(0, 0);
this->layers[2] = new MWFusedConvReLULayer;
this->layers[2]->setName("conv1_relu1");
this->layers[3] = new MWNormLayer;
this->layers[3]->setName("norm1");
this->layers[4] = new MWMaxPoolingLayer;
this->layers[4]->setName("pool1");
this->layers[5] = new MWFusedConvReLULayer;
this->layers[5]->setName("conv2_relu2");
this->layers[6] = new MWNormLayer;
this->layers[6]->setName("norm2");
this->layers[7] = new MWMaxPoolingLayer;
this->layers[7]->setName("pool2");
this->layers[8] = new MWFusedConvReLULayer;
this->layers[8]->setName("conv3_relu3");
this->layers[9] = new MWFusedConvReLULayer;
this->layers[9]->setName("conv4_relu4");
this->layers[10] = new MWFusedConvReLULayer;
this->layers[10]->setName("conv5_relu5");
this->layers[11] = new MWMaxPoolingLayer;
this->layers[11]->setName("pool5");
this->layers[12] = new MWFCLayer;
this->layers[12]->setName("fc6");
this->layers[13] = new MWReLULayer;
this->layers[13]->setName("relu6");
this->layers[13]->setInPlaceIndex(0, 0);
this->layers[14] = new MWFCLayer;
this->layers[14]->setName("fcLane1");
this->layers[15] = new MWReLULayer;
this->layers[15]->setName("fcLane1Relu");
this->layers[15]->setInPlaceIndex(0, 0);
this->layers[16] = new MWFCLayer;
this->layers[16]->setName("fcLane2");
this->layers[17] = new MWOutputLayer;
this->layers[17]->setName("output");
this->layers[17]->setInPlaceIndex(0, 0);
this->targetImpl = new MWTargetNetworkImpl;
this->inputTensors[0] = new MWTensor<float>;
this->inputTensors[0]->setHeight(227);
this->inputTensors[0]->setWidth(227);
this->inputTensors[0]->setChannels(3);
this->inputTensors[0]->setBatchSize(1);
this->inputTensors[0]->setSequenceLength(1);
}
//
// Arguments : void
// Return Type : void
//
lanenet0_0::~lanenet0_0() {
this->cleanup();
checkCleanupCudaError(hipGetLastError(), __FILE__, __LINE__);
for (int idx = 0; idx < 18; idx++) {
delete this->layers[idx];
}
if (this->targetImpl) {
delete this->targetImpl;
}
delete this->inputTensors[0];
}
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::predict() {
for (int idx = 0; idx < 18; idx++) {
this->layers[idx]->predict();
}
}
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::resetState() {
}
//
// Arguments : lanenet0_0 *obj
// Return Type : void
//
namespace coder {
void DeepLearningNetwork_setup(lanenet0_0 *obj) {
obj->setup();
}
}
//
// File trailer for DeepLearningNetwork.cu
//
// [EOF]
//
| 87e311ecedf3b567500d7629ca823d15efa521b4.cu | //
// File: DeepLearningNetwork.cu
//
// GPU Coder version : 2.0
// CUDA/C/C++ source code generated on : 15-Dec-2020 12:44:50
//
// Include Files
#include "DeepLearningNetwork.h"
#include "detect_lane_internal_types.h"
#include "MWElementwiseAffineLayer.hpp"
#include "MWFusedConvReLULayer.hpp"
#include "MWTargetNetworkImpl.hpp"
#include "cnn_api.hpp"
#include <cstdio>
const char *errorString =
"Abnormal termination due to: %s.\nError in %s (line %d).";
// Function Declarations
static void checkCleanupCudaError(cudaError_t errCode, const char *file,
unsigned int line);
// Function Definitions
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::allocate() {
this->targetImpl->allocate(290400, 2);
for (int idx = 0; idx < 18; idx++) {
this->layers[idx]->allocate();
}
(static_cast<MWTensor<float> *>(this->inputTensors[0]))->setData(this->layers
[0]->getLayerOutput(0));
}
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::cleanup() {
this->deallocate();
for (int idx = 0; idx < 18; idx++) {
this->layers[idx]->cleanup();
}
if (this->targetImpl) {
this->targetImpl->cleanup();
}
}
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::deallocate() {
this->targetImpl->deallocate();
for (int idx = 0; idx < 18; idx++) {
this->layers[idx]->deallocate();
}
}
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::postsetup() {
this->targetImpl->postSetup(this->layers, this->numLayers);
}
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::setSize() {
for (int idx = 0; idx < 18; idx++) {
this->layers[idx]->propagateSize();
}
this->allocate();
this->postsetup();
}
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::setup() {
this->targetImpl->preSetup();
this->targetImpl->setAutoTune(true);
(static_cast<MWInputLayer *>(this->layers[0]))->createInputLayer
(this->targetImpl, this->inputTensors[0], 227, 227, 3, 0, "", 0);
(static_cast<MWElementwiseAffineLayer *>(this->layers[1]))
->createElementwiseAffineLayer(this->targetImpl, this->layers[0]
->getOutputTensor(0), 227, 227, 3, 227, 227, 3, false, 1, 1,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_data_scale.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_data_offset.bin", 0);
(static_cast<MWFusedConvReLULayer *>(this->layers[2]))
->createFusedConvReLULayer(this->targetImpl, 1, this->layers[1]
->getOutputTensor(0), 11, 11, 3, 96, 4, 4, 0, 0, 0, 0, 1, 1, 1,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv1_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv1_b.bin", 1);
(static_cast<MWNormLayer *>(this->layers[3]))->createNormLayer
(this->targetImpl, this->layers[2]->getOutputTensor(0), 5, 0.0001, 0.75, 1.0,
0);
(static_cast<MWMaxPoolingLayer *>(this->layers[4]))->createMaxPoolingLayer
(this->targetImpl, this->layers[3]->getOutputTensor(0), 3, 3, 2, 2, 0, 0, 0,
0, 0, 1, 1);
(static_cast<MWFusedConvReLULayer *>(this->layers[5]))
->createFusedConvReLULayer(this->targetImpl, 1, this->layers[4]
->getOutputTensor(0), 5, 5, 48, 128, 1, 1, 2, 2, 2, 2, 1, 1, 2,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv2_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv2_b.bin", 0);
(static_cast<MWNormLayer *>(this->layers[6]))->createNormLayer
(this->targetImpl, this->layers[5]->getOutputTensor(0), 5, 0.0001, 0.75, 1.0,
1);
(static_cast<MWMaxPoolingLayer *>(this->layers[7]))->createMaxPoolingLayer
(this->targetImpl, this->layers[6]->getOutputTensor(0), 3, 3, 2, 2, 0, 0, 0,
0, 0, 1, 0);
(static_cast<MWFusedConvReLULayer *>(this->layers[8]))
->createFusedConvReLULayer(this->targetImpl, 1, this->layers[7]
->getOutputTensor(0), 3, 3, 256, 384, 1, 1, 1, 1, 1, 1, 1, 1, 1,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv3_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv3_b.bin", 1);
(static_cast<MWFusedConvReLULayer *>(this->layers[9]))
->createFusedConvReLULayer(this->targetImpl, 1, this->layers[8]
->getOutputTensor(0), 3, 3, 192, 192, 1, 1, 1, 1, 1, 1, 1, 1, 2,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv4_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv4_b.bin", 0);
(static_cast<MWFusedConvReLULayer *>(this->layers[10]))
->createFusedConvReLULayer(this->targetImpl, 1, this->layers[9]
->getOutputTensor(0), 3, 3, 192, 128, 1, 1, 1, 1, 1, 1, 1, 1, 2,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv5_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_conv5_b.bin", 1);
(static_cast<MWMaxPoolingLayer *>(this->layers[11]))->createMaxPoolingLayer
(this->targetImpl, this->layers[10]->getOutputTensor(0), 3, 3, 2, 2, 0, 0, 0,
0, 0, 1, 0);
(static_cast<MWFCLayer *>(this->layers[12]))->createFCLayer(this->targetImpl,
this->layers[11]->getOutputTensor(0), 9216, 4096,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_fc6_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_fc6_b.bin",
1);
(static_cast<MWReLULayer *>(this->layers[13]))->createReLULayer
(this->targetImpl, this->layers[12]->getOutputTensor(0), 1);
(static_cast<MWFCLayer *>(this->layers[14]))->createFCLayer(this->targetImpl,
this->layers[13]->getOutputTensor(0), 4096, 16,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_fcLane1_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_fcLane1_b.bin",
0);
(static_cast<MWReLULayer *>(this->layers[15]))->createReLULayer
(this->targetImpl, this->layers[14]->getOutputTensor(0), 0);
(static_cast<MWFCLayer *>(this->layers[16]))->createFCLayer(this->targetImpl,
this->layers[15]->getOutputTensor(0), 16, 6,
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_fcLane2_w.bin",
"/home/dinhnambkhn/CLionProjects/GPUDeep_lanenet/detect_lane/cnn_lanenet0_0_fcLane2_b.bin",
1);
(static_cast<MWOutputLayer *>(this->layers[17]))->createOutputLayer
(this->targetImpl, this->layers[16]->getOutputTensor(0), 1);
this->outputTensors[0] = this->layers[17]->getOutputTensor(0);
this->setSize();
}
//
// Arguments : cudaError_t errCode
// const char *file
// unsigned int line
// Return Type : void
//
static void checkCleanupCudaError(cudaError_t errCode, const char *file,
unsigned int line) {
if ((errCode != cudaSuccess) && (errCode != cudaErrorCudartUnloading)) {
printf(errorString, cudaGetErrorString(errCode), file, line);
}
}
//
// Arguments : void
// Return Type : int
//
int lanenet0_0::getBatchSize() {
return this->inputTensors[0]->getBatchSize();
}
//
// Arguments : int b_index
// Return Type : float *
//
float *lanenet0_0::getInputDataPointer(int b_index) {
return (static_cast<MWTensor<float> *>(this->inputTensors[b_index]))->getData();
}
//
// Arguments : void
// Return Type : float *
//
float *lanenet0_0::getInputDataPointer() {
return (static_cast<MWTensor<float> *>(this->inputTensors[0]))->getData();
}
//
// Arguments : int layerIndex
// int portIndex
// Return Type : float *
//
float *lanenet0_0::getLayerOutput(int layerIndex, int portIndex) {
return this->layers[layerIndex]->getLayerOutput(portIndex);
}
//
// Arguments : int b_index
// Return Type : float *
//
float *lanenet0_0::getOutputDataPointer(int b_index) {
return (static_cast<MWTensor<float> *>(this->outputTensors[b_index]))->getData
();
}
//
// Arguments : void
// Return Type : float *
//
float *lanenet0_0::getOutputDataPointer() {
return (static_cast<MWTensor<float> *>(this->outputTensors[0]))->getData();
}
//
// Arguments : void
// Return Type : void
//
lanenet0_0::lanenet0_0() {
this->numLayers = 18;
this->targetImpl = 0;
this->layers[0] = new MWInputLayer;
this->layers[0]->setName("data");
this->layers[1] = new MWElementwiseAffineLayer;
this->layers[1]->setName("data_normalization");
this->layers[1]->setInPlaceIndex(0, 0);
this->layers[2] = new MWFusedConvReLULayer;
this->layers[2]->setName("conv1_relu1");
this->layers[3] = new MWNormLayer;
this->layers[3]->setName("norm1");
this->layers[4] = new MWMaxPoolingLayer;
this->layers[4]->setName("pool1");
this->layers[5] = new MWFusedConvReLULayer;
this->layers[5]->setName("conv2_relu2");
this->layers[6] = new MWNormLayer;
this->layers[6]->setName("norm2");
this->layers[7] = new MWMaxPoolingLayer;
this->layers[7]->setName("pool2");
this->layers[8] = new MWFusedConvReLULayer;
this->layers[8]->setName("conv3_relu3");
this->layers[9] = new MWFusedConvReLULayer;
this->layers[9]->setName("conv4_relu4");
this->layers[10] = new MWFusedConvReLULayer;
this->layers[10]->setName("conv5_relu5");
this->layers[11] = new MWMaxPoolingLayer;
this->layers[11]->setName("pool5");
this->layers[12] = new MWFCLayer;
this->layers[12]->setName("fc6");
this->layers[13] = new MWReLULayer;
this->layers[13]->setName("relu6");
this->layers[13]->setInPlaceIndex(0, 0);
this->layers[14] = new MWFCLayer;
this->layers[14]->setName("fcLane1");
this->layers[15] = new MWReLULayer;
this->layers[15]->setName("fcLane1Relu");
this->layers[15]->setInPlaceIndex(0, 0);
this->layers[16] = new MWFCLayer;
this->layers[16]->setName("fcLane2");
this->layers[17] = new MWOutputLayer;
this->layers[17]->setName("output");
this->layers[17]->setInPlaceIndex(0, 0);
this->targetImpl = new MWTargetNetworkImpl;
this->inputTensors[0] = new MWTensor<float>;
this->inputTensors[0]->setHeight(227);
this->inputTensors[0]->setWidth(227);
this->inputTensors[0]->setChannels(3);
this->inputTensors[0]->setBatchSize(1);
this->inputTensors[0]->setSequenceLength(1);
}
//
// Arguments : void
// Return Type : void
//
lanenet0_0::~lanenet0_0() {
this->cleanup();
checkCleanupCudaError(cudaGetLastError(), __FILE__, __LINE__);
for (int idx = 0; idx < 18; idx++) {
delete this->layers[idx];
}
if (this->targetImpl) {
delete this->targetImpl;
}
delete this->inputTensors[0];
}
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::predict() {
for (int idx = 0; idx < 18; idx++) {
this->layers[idx]->predict();
}
}
//
// Arguments : void
// Return Type : void
//
void lanenet0_0::resetState() {
}
//
// Arguments : lanenet0_0 *obj
// Return Type : void
//
namespace coder {
void DeepLearningNetwork_setup(lanenet0_0 *obj) {
obj->setup();
}
}
//
// File trailer for DeepLearningNetwork.cu
//
// [EOF]
//
|
5adb42540e1c8e39c1af19d249d88afc9fbd6439.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
struct RandomWalkApp {
__host__ __device__ int steps() {return 100;}
__host__ __device__
int stepSize(int k) {
return 1;
}
__host__ __device__ int samplingType()
{
return SamplingType::IndividualNeighborhood;
}
__host__ __device__ OutputFormat outputFormat()
{
return SampledVertices;
}
#define VERTICES_PER_SAMPLE 1
__host__ __device__ EdgePos_t numSamples(CSR* graph)
{
return graph->get_n_vertices() < 256*1024 ? 100 * graph->get_n_vertices() : graph->get_n_vertices();
}
template<class SampleType>
__host__ std::vector<VertexID_t> initialSample(int sampleIdx, CSR* graph, SampleType& sample)
{
std::vector<VertexID_t> initialValue;
for (int i = 0; i < VERTICES_PER_SAMPLE; i++) {
initialValue.push_back(sampleIdx%graph->get_n_vertices());
}
return initialValue;
}
__host__ __device__ EdgePos_t initialSampleSize(CSR* graph)
{
return VERTICES_PER_SAMPLE;
}
__host__ __device__ bool hasExplicitTransits()
{
return false;
}
template<class SampleType>
__host__ __device__ VertexID_t stepTransits(int step, const VertexID_t sampleID, SampleType& sample, int transitIdx, hiprandState_t* randState)
{
return -1;
}
template<class SampleType>
__host__ SampleType initializeSample(CSR* graph, const VertexID_t sampleID)
{
SampleType sample = SampleType ();
return sample;
}
};
struct DeepWalkApp : public RandomWalkApp {
template<typename SampleType, typename EdgeArray, typename WeightArray>
__device__ inline
VertexID next(int step, CSRPartition* csr, const VertexID* transit, const VertexID sampleIdx,
SampleType* sample,
const float max_weight,
EdgeArray& transitEdges, WeightArray& transitEdgeWeights,
const EdgePos_t numEdges, const VertexID_t neighbrID, hiprandState_t* state)
{
if (numEdges == 0) {
return -1;
}
if (numEdges == 1) {
return transitEdges[0];
}
EdgePos_t x = RandNumGen::rand_int(state, numEdges);
float y = hiprand_uniform(state)*max_weight;
while (y > transitEdgeWeights[x]) {
x = RandNumGen::rand_int(state, numEdges);
y = hiprand_uniform(state)*max_weight;
}
return transitEdges[x];
}
};
struct PPRApp : public RandomWalkApp {
template<typename SampleType, typename EdgeArray, typename WeightArray>
__device__ inline
VertexID next(int step, CSRPartition* csr, const VertexID* transit, const VertexID sampleIdx,
SampleType* sample,
const float max_weight,
EdgeArray& transitEdges, WeightArray& transitEdgeWeights,
const EdgePos_t numEdges, const VertexID_t neighbrID, hiprandState_t* state)
{
if (numEdges == 0) {
return -1;
}
const float walkEndProb = 0.01f;
float p = hiprand_uniform(state);
if (p < walkEndProb) {
return -1;
}
if (numEdges == 1) {
return transitEdges[0];
}
EdgePos_t x = RandNumGen::rand_int(state, numEdges);
float y = hiprand_uniform(state)*max_weight;
while (y > transitEdgeWeights[x]) {
x = RandNumGen::rand_int(state, numEdges);
y = hiprand_uniform(state)*max_weight;
}
return transitEdges[x];
}
};
class DummySample
{
};
struct Node2VecApp : public RandomWalkApp {
template<typename SampleType, typename EdgeArray, typename WeightArray>
__device__ inline
VertexID next(int step, CSRPartition* csr, const VertexID* transits, const VertexID sampleIdx,
SampleType* sample,
const float max_weight,
EdgeArray& transitEdges, WeightArray& transitEdgeWeights,
const EdgePos_t numEdges, const VertexID_t neighbrID, hiprandState_t* state)
{
if (numEdges == 0) {
return -1;
}
if (numEdges == 1 || step == 0) {
sample->t = *transits;
return transitEdges[0];
}
const float p = 2.0f;
const float q = 0.5f;
do {
EdgePos_t x = RandNumGen::rand_int(state, numEdges);
VertexID v = transitEdges[x];
float y = hiprand_uniform(state)*max(max(p, 1/q), 1.0f);
const CSR::Edge* tEdges = csr->get_edges(sample->t);
EdgePos_t tNumEdges = csr->get_n_edges_for_vertex(sample->t);
float h;
if (x == sample->t) {
h = p;
} else if (utils::binarySearch(tEdges, v, tNumEdges)) {
h = 1/q;
} else {
h = 1.0f;
}
if (y < h) {
sample->t = *transits;
return v;
}
} while (true);
}
};
class Node2VecSample {
public:
VertexID t;
}; | 5adb42540e1c8e39c1af19d249d88afc9fbd6439.cu | #include <stdlib.h>
struct RandomWalkApp {
__host__ __device__ int steps() {return 100;}
__host__ __device__
int stepSize(int k) {
return 1;
}
__host__ __device__ int samplingType()
{
return SamplingType::IndividualNeighborhood;
}
__host__ __device__ OutputFormat outputFormat()
{
return SampledVertices;
}
#define VERTICES_PER_SAMPLE 1
__host__ __device__ EdgePos_t numSamples(CSR* graph)
{
return graph->get_n_vertices() < 256*1024 ? 100 * graph->get_n_vertices() : graph->get_n_vertices();
}
template<class SampleType>
__host__ std::vector<VertexID_t> initialSample(int sampleIdx, CSR* graph, SampleType& sample)
{
std::vector<VertexID_t> initialValue;
for (int i = 0; i < VERTICES_PER_SAMPLE; i++) {
initialValue.push_back(sampleIdx%graph->get_n_vertices());
}
return initialValue;
}
__host__ __device__ EdgePos_t initialSampleSize(CSR* graph)
{
return VERTICES_PER_SAMPLE;
}
__host__ __device__ bool hasExplicitTransits()
{
return false;
}
template<class SampleType>
__host__ __device__ VertexID_t stepTransits(int step, const VertexID_t sampleID, SampleType& sample, int transitIdx, curandState* randState)
{
return -1;
}
template<class SampleType>
__host__ SampleType initializeSample(CSR* graph, const VertexID_t sampleID)
{
SampleType sample = SampleType ();
return sample;
}
};
struct DeepWalkApp : public RandomWalkApp {
template<typename SampleType, typename EdgeArray, typename WeightArray>
__device__ inline
VertexID next(int step, CSRPartition* csr, const VertexID* transit, const VertexID sampleIdx,
SampleType* sample,
const float max_weight,
EdgeArray& transitEdges, WeightArray& transitEdgeWeights,
const EdgePos_t numEdges, const VertexID_t neighbrID, curandState* state)
{
if (numEdges == 0) {
return -1;
}
if (numEdges == 1) {
return transitEdges[0];
}
EdgePos_t x = RandNumGen::rand_int(state, numEdges);
float y = curand_uniform(state)*max_weight;
while (y > transitEdgeWeights[x]) {
x = RandNumGen::rand_int(state, numEdges);
y = curand_uniform(state)*max_weight;
}
return transitEdges[x];
}
};
struct PPRApp : public RandomWalkApp {
template<typename SampleType, typename EdgeArray, typename WeightArray>
__device__ inline
VertexID next(int step, CSRPartition* csr, const VertexID* transit, const VertexID sampleIdx,
SampleType* sample,
const float max_weight,
EdgeArray& transitEdges, WeightArray& transitEdgeWeights,
const EdgePos_t numEdges, const VertexID_t neighbrID, curandState* state)
{
if (numEdges == 0) {
return -1;
}
const float walkEndProb = 0.01f;
float p = curand_uniform(state);
if (p < walkEndProb) {
return -1;
}
if (numEdges == 1) {
return transitEdges[0];
}
EdgePos_t x = RandNumGen::rand_int(state, numEdges);
float y = curand_uniform(state)*max_weight;
while (y > transitEdgeWeights[x]) {
x = RandNumGen::rand_int(state, numEdges);
y = curand_uniform(state)*max_weight;
}
return transitEdges[x];
}
};
class DummySample
{
};
struct Node2VecApp : public RandomWalkApp {
template<typename SampleType, typename EdgeArray, typename WeightArray>
__device__ inline
VertexID next(int step, CSRPartition* csr, const VertexID* transits, const VertexID sampleIdx,
SampleType* sample,
const float max_weight,
EdgeArray& transitEdges, WeightArray& transitEdgeWeights,
const EdgePos_t numEdges, const VertexID_t neighbrID, curandState* state)
{
if (numEdges == 0) {
return -1;
}
if (numEdges == 1 || step == 0) {
sample->t = *transits;
return transitEdges[0];
}
const float p = 2.0f;
const float q = 0.5f;
do {
EdgePos_t x = RandNumGen::rand_int(state, numEdges);
VertexID v = transitEdges[x];
float y = curand_uniform(state)*max(max(p, 1/q), 1.0f);
const CSR::Edge* tEdges = csr->get_edges(sample->t);
EdgePos_t tNumEdges = csr->get_n_edges_for_vertex(sample->t);
float h;
if (x == sample->t) {
h = p;
} else if (utils::binarySearch(tEdges, v, tNumEdges)) {
h = 1/q;
} else {
h = 1.0f;
}
if (y < h) {
sample->t = *transits;
return v;
}
} while (true);
}
};
class Node2VecSample {
public:
VertexID t;
}; |
ad052ca10cf28cb0f8fd2936978edacd1090dd6f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.hpp"
#include "fft_product2.hpp"
#include <iostream>
#include "arithmetic.hpp"
//#include "fft.hpp"
using namespace std;
template<bool accumulate> __device__
static inline void assignAcc(hipComplex & out, const hipComplex toAcc);
template<> __device__
inline void assignAcc<true>(hipComplex & out, const hipComplex toAcc) {
out = cuCaddf(out, toAcc);
}
template<> __device__
inline void assignAcc<false>(hipComplex & out, const hipComplex toAcc) {
out = toAcc;
}
template<bool conjugateKernel> __device__
static inline void assignKernelCache(hipComplex & out, const hipComplex in);
template<> __device__
inline void assignKernelCache<true>(hipComplex & out, const hipComplex in) {
out = cuConjf(in);
}
template<> __device__
inline void assignKernelCache<false>(hipComplex & out, const hipComplex in) {
out = in;
}
//TODO: this should be a matrix product using cublas
// o(m, g, y, x) = sum(f=0..F)sum(i=0..kH)sum(j=0..kW)
// i(m, f, y+i, x+j) k(g, f, i, j)
// outputF(m, g, y, x) = sum(f=0..F) inputF(m, f, y, x) kernelF(g, f, y, x)
// m \in 0..(M-1), stride in input : ism, stride in output : osm
// f \in 0..(F-1), ...
// g \in 0..(G-1), ...
template<int nCacheIn, int nCacheKer, bool accumulate, bool conjugateKernel> __global__
void fft_product_cudakernel(const hipComplex* inputF,
const hipComplex* kernelF,
hipComplex* outputF,
const int nRows, const int nCols,
const int M, const int ism, const int osm,
const int F, const int isf, const int ksf,
const int G, const int ksg, const int osg) {
const int y = blockIdx.x * blockDim.y + threadIdx.y;
//if (y >= N/2+1)
// return;
if (y >= nRows)
return;
const int x = threadIdx.x;
const int m0 = blockIdx.y * nCacheIn;
const int g0 = blockIdx.z * nCacheKer;
inputF += m0 * ism + y*nCols + x;
kernelF += g0*ksg + y*nCols + x;
outputF += m0 * osm + g0*osg + y*nCols + x;
/*
inputF += m0 * ism + y*N + x;
kernelF += g0*ksg + y*N + x;
outputF += m0 * osm + g0*osg + y*N + x;
*/
hipComplex inputCache [nCacheIn];
hipComplex kernelCache[nCacheKer];
hipComplex outputCache[nCacheIn*nCacheKer];
for (int i = 0; i < nCacheIn*nCacheKer; ++i)
outputCache[i] = make_cuComplex(0.f, 0.f);
for (int f = 0; f < F; ++f, inputF += isf, kernelF += ksf) {
for (int a = 0; a < nCacheIn; ++a)
inputCache [a] = inputF [a*ism];
for (int a = 0; a < nCacheKer; ++a){
assignKernelCache<conjugateKernel>(kernelCache[a], kernelF[a*ksg]);
}
/*
if(conjugateKernel)
kernelCache[a] = cuConjf(kernelF[a*ksg]);
else
kernelCache[a] = kernelF[a*ksg];
*/
for (int m = 0; m < nCacheIn; ++m)
for (int g = 0; g < nCacheKer; ++g)
outputCache[m*nCacheKer + g] =
cuCfmaf(inputCache[m], kernelCache[g], outputCache[m*nCacheKer + g]);
}
for (int m = 0; m < nCacheIn; ++m)
for (int g = 0; g < nCacheKer; ++g)
assignAcc<accumulate>(outputF[m*osm + g*osg], outputCache[m*nCacheKer + g]);
}
template<int nCacheIn, int nCacheKer>
void fft_product_nCaches(const hipComplex* inputF,
const hipComplex* kernelF,
hipComplex* outputF,
const int nRows, const int nCols,
const int M, const int ism, const int osm,
const int F, const int isf, const int ksf,
const int G, const int ksg, const int osg,
const bool accumulate, const bool conjugateKernel) {
fft_assert(M % nCacheIn == 0);
fft_assert(G % nCacheKer == 0);
const int nLinesPerBlock = min(nRows, max(128/nCols,1));
//const int nLinesPerBlock = min(N/2+1, max(128/N, 1)); // TODO: is 128 optimal ?
//TODO: we could reuse the unused last y's in the next block
//printf("grid size=%d x %d x %d\n",DIVUP(nRows,nLinesPerBlock), M/nCacheIn, G/nCacheKer);
//printf("block size=%d x %d\n",nCols,nLinesPerBlock);
dim3 blocks(DIVUP(nRows,nLinesPerBlock), M/nCacheIn, G/nCacheKer);
dim3 threads(nCols, nLinesPerBlock);
//dim3 blocks(DIVUP(N/2+1,nLinesPerBlock), M/nCacheIn, G/nCacheKer);
//dim3 threads(N, nLinesPerBlock);
if (accumulate){
if (conjugateKernel)
hipLaunchKernelGGL(( fft_product_cudakernel<nCacheIn, nCacheKer, true, true>), dim3(blocks), dim3(threads), 0, 0,
inputF, kernelF, outputF, nRows, nCols, M, ism, osm, F, isf, ksf, G, ksg, osg);
else
hipLaunchKernelGGL(( fft_product_cudakernel<nCacheIn, nCacheKer, true, false>), dim3(blocks), dim3(threads), 0, 0,
inputF, kernelF, outputF, nRows, nCols, M, ism, osm, F, isf, ksf, G, ksg, osg);
}
else {
if (conjugateKernel)
hipLaunchKernelGGL(( fft_product_cudakernel<nCacheIn, nCacheKer, false, true>), dim3(blocks), dim3(threads), 0, 0,
inputF, kernelF, outputF, nRows, nCols, M, ism, osm, F, isf, ksf, G, ksg, osg);
else
hipLaunchKernelGGL(( fft_product_cudakernel<nCacheIn, nCacheKer, false, false>), dim3(blocks), dim3(threads), 0, 0,
inputF, kernelF, outputF, nRows, nCols, M, ism, osm, F, isf, ksf, G, ksg, osg);
}
CUDA_LOOK_FOR_ERROR();
}
void fft_product_call(const hipComplex* inputF,
const hipComplex* kernelF,
hipComplex* outputF,
const int nRows, const int nCols,
const int M, const int ism, const int osm,
const int F, const int isf, const int ksf,
const int G, const int ksg, const int osg,
const bool accumulate, const bool conjugateKernel) {
if (M % 4 == 0) {
const int nCacheIn = 4;
if (G % 4 == 0) {
const int nCacheKer = 4;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
} else if (G % 3 == 0) {
const int nCacheKer = 3;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate,conjugateKernel);
} else {
const int nCacheKer = 1;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
}
} else if (M % 3 == 0) {
const int nCacheIn = 3;
if (G % 4 == 0) {
const int nCacheKer = 4;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
} else if (G % 3 == 0) {
const int nCacheKer = 3;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
} else {
const int nCacheKer = 1;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
}
} else {
const int nCacheIn = 1;
if (G % 4 == 0) {
const int nCacheKer = 4;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
} else if (G % 3 == 0) {
const int nCacheKer = 3;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
} else {
const int nCacheKer = 1;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
}
}
}
| ad052ca10cf28cb0f8fd2936978edacd1090dd6f.cu | #include "common.hpp"
#include "fft_product2.hpp"
#include <iostream>
#include "arithmetic.hpp"
//#include "fft.hpp"
using namespace std;
template<bool accumulate> __device__
static inline void assignAcc(cuComplex & out, const cuComplex toAcc);
template<> __device__
inline void assignAcc<true>(cuComplex & out, const cuComplex toAcc) {
out = cuCaddf(out, toAcc);
}
template<> __device__
inline void assignAcc<false>(cuComplex & out, const cuComplex toAcc) {
out = toAcc;
}
template<bool conjugateKernel> __device__
static inline void assignKernelCache(cuComplex & out, const cuComplex in);
template<> __device__
inline void assignKernelCache<true>(cuComplex & out, const cuComplex in) {
out = cuConjf(in);
}
template<> __device__
inline void assignKernelCache<false>(cuComplex & out, const cuComplex in) {
out = in;
}
//TODO: this should be a matrix product using cublas
// o(m, g, y, x) = sum(f=0..F)sum(i=0..kH)sum(j=0..kW)
// i(m, f, y+i, x+j) k(g, f, i, j)
// outputF(m, g, y, x) = sum(f=0..F) inputF(m, f, y, x) kernelF(g, f, y, x)
// m \in 0..(M-1), stride in input : ism, stride in output : osm
// f \in 0..(F-1), ...
// g \in 0..(G-1), ...
template<int nCacheIn, int nCacheKer, bool accumulate, bool conjugateKernel> __global__
void fft_product_cudakernel(const cuComplex* inputF,
const cuComplex* kernelF,
cuComplex* outputF,
const int nRows, const int nCols,
const int M, const int ism, const int osm,
const int F, const int isf, const int ksf,
const int G, const int ksg, const int osg) {
const int y = blockIdx.x * blockDim.y + threadIdx.y;
//if (y >= N/2+1)
// return;
if (y >= nRows)
return;
const int x = threadIdx.x;
const int m0 = blockIdx.y * nCacheIn;
const int g0 = blockIdx.z * nCacheKer;
inputF += m0 * ism + y*nCols + x;
kernelF += g0*ksg + y*nCols + x;
outputF += m0 * osm + g0*osg + y*nCols + x;
/*
inputF += m0 * ism + y*N + x;
kernelF += g0*ksg + y*N + x;
outputF += m0 * osm + g0*osg + y*N + x;
*/
cuComplex inputCache [nCacheIn];
cuComplex kernelCache[nCacheKer];
cuComplex outputCache[nCacheIn*nCacheKer];
for (int i = 0; i < nCacheIn*nCacheKer; ++i)
outputCache[i] = make_cuComplex(0.f, 0.f);
for (int f = 0; f < F; ++f, inputF += isf, kernelF += ksf) {
for (int a = 0; a < nCacheIn; ++a)
inputCache [a] = inputF [a*ism];
for (int a = 0; a < nCacheKer; ++a){
assignKernelCache<conjugateKernel>(kernelCache[a], kernelF[a*ksg]);
}
/*
if(conjugateKernel)
kernelCache[a] = cuConjf(kernelF[a*ksg]);
else
kernelCache[a] = kernelF[a*ksg];
*/
for (int m = 0; m < nCacheIn; ++m)
for (int g = 0; g < nCacheKer; ++g)
outputCache[m*nCacheKer + g] =
cuCfmaf(inputCache[m], kernelCache[g], outputCache[m*nCacheKer + g]);
}
for (int m = 0; m < nCacheIn; ++m)
for (int g = 0; g < nCacheKer; ++g)
assignAcc<accumulate>(outputF[m*osm + g*osg], outputCache[m*nCacheKer + g]);
}
template<int nCacheIn, int nCacheKer>
void fft_product_nCaches(const cuComplex* inputF,
const cuComplex* kernelF,
cuComplex* outputF,
const int nRows, const int nCols,
const int M, const int ism, const int osm,
const int F, const int isf, const int ksf,
const int G, const int ksg, const int osg,
const bool accumulate, const bool conjugateKernel) {
fft_assert(M % nCacheIn == 0);
fft_assert(G % nCacheKer == 0);
const int nLinesPerBlock = min(nRows, max(128/nCols,1));
//const int nLinesPerBlock = min(N/2+1, max(128/N, 1)); // TODO: is 128 optimal ?
//TODO: we could reuse the unused last y's in the next block
//printf("grid size=%d x %d x %d\n",DIVUP(nRows,nLinesPerBlock), M/nCacheIn, G/nCacheKer);
//printf("block size=%d x %d\n",nCols,nLinesPerBlock);
dim3 blocks(DIVUP(nRows,nLinesPerBlock), M/nCacheIn, G/nCacheKer);
dim3 threads(nCols, nLinesPerBlock);
//dim3 blocks(DIVUP(N/2+1,nLinesPerBlock), M/nCacheIn, G/nCacheKer);
//dim3 threads(N, nLinesPerBlock);
if (accumulate){
if (conjugateKernel)
fft_product_cudakernel<nCacheIn, nCacheKer, true, true><<<blocks, threads>>>
(inputF, kernelF, outputF, nRows, nCols, M, ism, osm, F, isf, ksf, G, ksg, osg);
else
fft_product_cudakernel<nCacheIn, nCacheKer, true, false><<<blocks, threads>>>
(inputF, kernelF, outputF, nRows, nCols, M, ism, osm, F, isf, ksf, G, ksg, osg);
}
else {
if (conjugateKernel)
fft_product_cudakernel<nCacheIn, nCacheKer, false, true><<<blocks, threads>>>
(inputF, kernelF, outputF, nRows, nCols, M, ism, osm, F, isf, ksf, G, ksg, osg);
else
fft_product_cudakernel<nCacheIn, nCacheKer, false, false><<<blocks, threads>>>
(inputF, kernelF, outputF, nRows, nCols, M, ism, osm, F, isf, ksf, G, ksg, osg);
}
CUDA_LOOK_FOR_ERROR();
}
void fft_product_call(const cuComplex* inputF,
const cuComplex* kernelF,
cuComplex* outputF,
const int nRows, const int nCols,
const int M, const int ism, const int osm,
const int F, const int isf, const int ksf,
const int G, const int ksg, const int osg,
const bool accumulate, const bool conjugateKernel) {
if (M % 4 == 0) {
const int nCacheIn = 4;
if (G % 4 == 0) {
const int nCacheKer = 4;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
} else if (G % 3 == 0) {
const int nCacheKer = 3;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate,conjugateKernel);
} else {
const int nCacheKer = 1;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
}
} else if (M % 3 == 0) {
const int nCacheIn = 3;
if (G % 4 == 0) {
const int nCacheKer = 4;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
} else if (G % 3 == 0) {
const int nCacheKer = 3;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
} else {
const int nCacheKer = 1;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
}
} else {
const int nCacheIn = 1;
if (G % 4 == 0) {
const int nCacheKer = 4;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
} else if (G % 3 == 0) {
const int nCacheKer = 3;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
} else {
const int nCacheKer = 1;
fft_product_nCaches<nCacheIn, nCacheKer>(inputF, kernelF, outputF, nRows, nCols,
M, ism, osm, F, isf, ksf,
G, ksg, osg, accumulate, conjugateKernel);
}
}
}
|
c0bf05de2d2e7b45eab838aef09a0ee0dd7c01b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<algorithm>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <thrust/sort.h>
//#define PSIZE 10
//#define NGEN 500000
#define MUT_PROB 0.05
#define TESTE 256
struct Individual
{
float fitness;
unsigned int chromossomes;
};
__device__ bool comparator (Individual i, Individual j)
{
return (i.fitness > j.fitness);
}
void printPop(Individual *population, int popSize, int print)
{
if(print != 0)
{
for(int i = 0; i < popSize; i++)
{
printf("%f - ", population[i].fitness);
}
}
}
__global__ void persistentThreads(int popSize, int NGEN, float *maxFitness, unsigned int seed)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
Individual child;
hiprandState_t state;
hiprand_init(seed, id, 0, &state);
__shared__ float totalFitness;
extern __shared__ Individual population[];
for(int i = id; i < popSize; i+=blockDim.x)
{
//Create population
population[i].fitness = 0;
population[i].chromossomes = hiprand(&state);
}
__syncthreads();
for(int g = 0; g < NGEN; g++)
{
if(id == 0)
{
totalFitness = 0;
}
__syncthreads();
for(int i = id; i < popSize; i+=blockDim.x)
{
//Calculate fitness
unsigned int mask = 0x3FF;
float a = 0, b = 0, c = 0;
a = population[i].chromossomes & mask;
b = (population[i].chromossomes & (mask << 10)) >> 10;
c = (population[i].chromossomes & (mask << 20)) >> 20;
a = (a - 512)/100.0;
b = (b - 512)/100.0;
c = (c - 512)/100.0;
population[i].fitness = 1.0 / (1 + a*a + b*b + c*c);
atomicAdd(&totalFitness, population[i].fitness);
}
__syncthreads();
if(id == 0)
{
thrust::sort(population, population + popSize, comparator);
maxFitness[g] = population[0].fitness;
}
__syncthreads();
float localTotalFitness = totalFitness;
for(int i = id; i < popSize; i+=blockDim.x)
{
Individual parents[2];
int temp = -1;
//Selection
for(int j = 0; j < 2; j++)
{
float p = hiprand_uniform(&state) * localTotalFitness;
float score = 0;
for(int k = 0; k < popSize; k++)
{
if(k == temp)
{
continue;
}
score += population[k].fitness;
if(p < score)
{
parents[j] = population[k];
localTotalFitness -= population[k].fitness;
temp = k;
break;
}
}
}
//Crossover
unsigned char cutPoint = hiprand(&state) % 31;
unsigned int mask1 = 0xffffffff << cutPoint;
unsigned int mask2 = 0xffffffff >> (32 - cutPoint);
child.fitness = 0;
child.chromossomes = (parents[0].chromossomes & mask1) + (parents[1].chromossomes & mask2);
//Mutation
float mutation = hiprand_uniform(&state);
if(mutation < MUT_PROB)
{
unsigned char mutPoint = hiprand(&state) % 30;
child.chromossomes ^= 1 << mutPoint;
}
}
__syncthreads();
if(id == 0)
{
child = population[0];
}
for(int i = id; i < popSize; i+=blockDim.x)
{
population[i] = child;
}
__syncthreads();
}
}
int main(int argc, char *argv[ ])
{
int PSIZE, NGEN, NIT, PRINT;
double Ttotal = 0;
if(argc < 5)
{
printf("Uso %s <POP_SIZE> <N_GEN> <N_ITERACOES> <PRINT>\n", argv[0]);
return 1;
}
else
{
PSIZE = atoi(argv[1]);
NGEN = atoi(argv[2]);
NIT = atoi(argv[3]);
PRINT = atoi(argv[4]);
}
for(int it = 0; it < NIT; it++)
{
clock_t start, end;
float *maxFitness, *cpu_maxFitness;
hipMalloc((void**) &maxFitness, NGEN * sizeof(float));
cpu_maxFitness = (float *) malloc(NGEN * sizeof(float));
start = clock();
hipLaunchKernelGGL(( persistentThreads), dim3(1), dim3(min(PSIZE, 1024)), PSIZE * sizeof(Individual), 0, PSIZE, NGEN, maxFitness, time(NULL));
hipDeviceSynchronize();
end = clock();
hipMemcpy(cpu_maxFitness, maxFitness, NGEN * sizeof(float), hipMemcpyDeviceToHost);
hipFree(maxFitness);
if(PRINT != 0)
{
printf("Gen\tFitness\n");
for(int i = 0; i < NGEN; i++)
{
printf("%d\t%f\n", i, cpu_maxFitness[i]);
}
}
free(cpu_maxFitness);
printf("\nT total(us)\t\tT gerao(us)\n");
double cpu_time_used = 1000000 * ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f\t\t%f\n\n", cpu_time_used, cpu_time_used/NGEN);
Ttotal += cpu_time_used;
}
printf("\nAvg T total(us)\t\tAvg T gerao(us)\n");
printf("%f\t\t%f\n", Ttotal/NIT, Ttotal/(NIT*NGEN));
return 0;
}
| c0bf05de2d2e7b45eab838aef09a0ee0dd7c01b0.cu | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<algorithm>
#include <curand.h>
#include <curand_kernel.h>
#include <thrust/sort.h>
//#define PSIZE 10
//#define NGEN 500000
#define MUT_PROB 0.05
#define TESTE 256
struct Individual
{
float fitness;
unsigned int chromossomes;
};
__device__ bool comparator (Individual i, Individual j)
{
return (i.fitness > j.fitness);
}
void printPop(Individual *population, int popSize, int print)
{
if(print != 0)
{
for(int i = 0; i < popSize; i++)
{
printf("%f - ", population[i].fitness);
}
}
}
__global__ void persistentThreads(int popSize, int NGEN, float *maxFitness, unsigned int seed)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
Individual child;
curandState_t state;
curand_init(seed, id, 0, &state);
__shared__ float totalFitness;
extern __shared__ Individual population[];
for(int i = id; i < popSize; i+=blockDim.x)
{
//Create population
population[i].fitness = 0;
population[i].chromossomes = curand(&state);
}
__syncthreads();
for(int g = 0; g < NGEN; g++)
{
if(id == 0)
{
totalFitness = 0;
}
__syncthreads();
for(int i = id; i < popSize; i+=blockDim.x)
{
//Calculate fitness
unsigned int mask = 0x3FF;
float a = 0, b = 0, c = 0;
a = population[i].chromossomes & mask;
b = (population[i].chromossomes & (mask << 10)) >> 10;
c = (population[i].chromossomes & (mask << 20)) >> 20;
a = (a - 512)/100.0;
b = (b - 512)/100.0;
c = (c - 512)/100.0;
population[i].fitness = 1.0 / (1 + a*a + b*b + c*c);
atomicAdd(&totalFitness, population[i].fitness);
}
__syncthreads();
if(id == 0)
{
thrust::sort(population, population + popSize, comparator);
maxFitness[g] = population[0].fitness;
}
__syncthreads();
float localTotalFitness = totalFitness;
for(int i = id; i < popSize; i+=blockDim.x)
{
Individual parents[2];
int temp = -1;
//Selection
for(int j = 0; j < 2; j++)
{
float p = curand_uniform(&state) * localTotalFitness;
float score = 0;
for(int k = 0; k < popSize; k++)
{
if(k == temp)
{
continue;
}
score += population[k].fitness;
if(p < score)
{
parents[j] = population[k];
localTotalFitness -= population[k].fitness;
temp = k;
break;
}
}
}
//Crossover
unsigned char cutPoint = curand(&state) % 31;
unsigned int mask1 = 0xffffffff << cutPoint;
unsigned int mask2 = 0xffffffff >> (32 - cutPoint);
child.fitness = 0;
child.chromossomes = (parents[0].chromossomes & mask1) + (parents[1].chromossomes & mask2);
//Mutation
float mutation = curand_uniform(&state);
if(mutation < MUT_PROB)
{
unsigned char mutPoint = curand(&state) % 30;
child.chromossomes ^= 1 << mutPoint;
}
}
__syncthreads();
if(id == 0)
{
child = population[0];
}
for(int i = id; i < popSize; i+=blockDim.x)
{
population[i] = child;
}
__syncthreads();
}
}
int main(int argc, char *argv[ ])
{
int PSIZE, NGEN, NIT, PRINT;
double Ttotal = 0;
if(argc < 5)
{
printf("Uso %s <POP_SIZE> <N_GEN> <N_ITERACOES> <PRINT>\n", argv[0]);
return 1;
}
else
{
PSIZE = atoi(argv[1]);
NGEN = atoi(argv[2]);
NIT = atoi(argv[3]);
PRINT = atoi(argv[4]);
}
for(int it = 0; it < NIT; it++)
{
clock_t start, end;
float *maxFitness, *cpu_maxFitness;
cudaMalloc((void**) &maxFitness, NGEN * sizeof(float));
cpu_maxFitness = (float *) malloc(NGEN * sizeof(float));
start = clock();
persistentThreads<<<1, min(PSIZE, 1024), PSIZE * sizeof(Individual)>>>(PSIZE, NGEN, maxFitness, time(NULL));
cudaDeviceSynchronize();
end = clock();
cudaMemcpy(cpu_maxFitness, maxFitness, NGEN * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(maxFitness);
if(PRINT != 0)
{
printf("Gen\tFitness\n");
for(int i = 0; i < NGEN; i++)
{
printf("%d\t%f\n", i, cpu_maxFitness[i]);
}
}
free(cpu_maxFitness);
printf("\nT total(us)\t\tT geração(us)\n");
double cpu_time_used = 1000000 * ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f\t\t%f\n\n", cpu_time_used, cpu_time_used/NGEN);
Ttotal += cpu_time_used;
}
printf("\nAvg T total(us)\t\tAvg T geração(us)\n");
printf("%f\t\t%f\n", Ttotal/NIT, Ttotal/(NIT*NGEN));
return 0;
}
|
35acc51c0f6ff5ee059f29db663a95f06c2f54ad.hip | // !!! This is a file automatically generated by hipify!!!
//********************************************************//
// CUDA SIFT extractor by Marten Bjorkman aka Celebrandil //
//********************************************************//
#include <cstdio>
#include "cudautils.h"
#include "cudaImage.h"
int iDivUp(int a, int b) { return (a%b != 0) ? (a/b + 1) : (a/b); }
int iDivDown(int a, int b) { return a/b; }
int iAlignUp(int a, int b) { return (a%b != 0) ? (a - a%b + b) : a; }
int iAlignDown(int a, int b) { return a - a%b; }
void CudaImage::Allocate(int w, int h, int p, bool host, float *devmem, float *hostmem)
{
width = w;
height = h;
pitch = p;
d_data = devmem;
h_data = hostmem;
t_data = NULL;
if (devmem==NULL) {
safeCall(hipMallocPitch((void **)&d_data, (size_t*)&pitch, (size_t)(sizeof(float)*width), (size_t)height));
//fprintf(stderr, "PITCH_RAW: %d\n", pitch);
pitch /= sizeof(float);
if (d_data==NULL)
printf("Failed to allocate device data\n");
d_internalAlloc = true;
}
if (host && hostmem==NULL) {
h_data = (float *)malloc(sizeof(float)*pitch*height);
h_internalAlloc = true;
}
//fprintf(stderr, "Alloc: H, D: %p %p\n", h_data, d_data);
}
CudaImage::CudaImage() :
d_data(NULL), h_data(NULL), t_data(NULL), d_internalAlloc(false), h_internalAlloc(false)
{
}
CudaImage::~CudaImage()
{
if (d_internalAlloc && d_data!=NULL)
safeCall(hipFree(d_data));
d_data = NULL;
if (h_internalAlloc && h_data!=NULL)
free(h_data);
h_data = NULL;
if (t_data!=NULL)
safeCall(hipFreeArray((hipArray *)t_data));
t_data = NULL;
}
double CudaImage::Download()
{
//fprintf(stderr, "DL: H, D: %p %p\n", h_data, d_data);
//fprintf(stderr, "size: pitch: %d; height: %d; width %d\n", pitch, height, width);
TimerGPU timer(0);
if (d_data!=NULL && h_data!=NULL) {
safeCall(hipMemcpy(d_data, h_data, sizeof(float)*pitch*height, hipMemcpyHostToDevice));
}
double gpuTime = timer.read();
#ifdef VERBOSE
printf("Download time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double CudaImage::Readback()
{
TimerGPU timer(0);
int p = sizeof(float)*pitch;
safeCall(hipMemcpy2D(h_data, p, d_data, p, sizeof(float)*width, height, hipMemcpyDeviceToHost));
double gpuTime = timer.read();
#ifdef VERBOSE
printf("Readback time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double CudaImage::InitTexture()
{
TimerGPU timer(0);
hipChannelFormatDesc t_desc = hipCreateChannelDesc<float>();
safeCall(hipMallocArray((hipArray **)&t_data, &t_desc, pitch, height));
if (t_data==NULL)
printf("Failed to allocated texture data\n");
double gpuTime = timer.read();
#ifdef VERBOSE
printf("InitTexture time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double CudaImage::CopyToTexture(CudaImage &dst, bool host)
{
if (dst.t_data==NULL) {
printf("Error CopyToTexture: No texture data\n");
return 0.0;
}
if ((!host || h_data==NULL) && (host || d_data==NULL)) {
printf("Error CopyToTexture: No source data\n");
return 0.0;
}
TimerGPU timer(0);
if (host)
safeCall(hipMemcpyToArray((hipArray *)dst.t_data, 0, 0, h_data, sizeof(float)*pitch*dst.height, hipMemcpyHostToDevice));
else
safeCall(hipMemcpyToArray((hipArray *)dst.t_data, 0, 0, d_data, sizeof(float)*pitch*dst.height, hipMemcpyDeviceToDevice));
safeCall(hipDeviceSynchronize());
double gpuTime = timer.read();
#ifdef VERBOSE
printf("CopyToTexture time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
| 35acc51c0f6ff5ee059f29db663a95f06c2f54ad.cu | //********************************************************//
// CUDA SIFT extractor by Marten Bjorkman aka Celebrandil //
//********************************************************//
#include <cstdio>
#include "cudautils.h"
#include "cudaImage.h"
int iDivUp(int a, int b) { return (a%b != 0) ? (a/b + 1) : (a/b); }
int iDivDown(int a, int b) { return a/b; }
int iAlignUp(int a, int b) { return (a%b != 0) ? (a - a%b + b) : a; }
int iAlignDown(int a, int b) { return a - a%b; }
void CudaImage::Allocate(int w, int h, int p, bool host, float *devmem, float *hostmem)
{
width = w;
height = h;
pitch = p;
d_data = devmem;
h_data = hostmem;
t_data = NULL;
if (devmem==NULL) {
safeCall(cudaMallocPitch((void **)&d_data, (size_t*)&pitch, (size_t)(sizeof(float)*width), (size_t)height));
//fprintf(stderr, "PITCH_RAW: %d\n", pitch);
pitch /= sizeof(float);
if (d_data==NULL)
printf("Failed to allocate device data\n");
d_internalAlloc = true;
}
if (host && hostmem==NULL) {
h_data = (float *)malloc(sizeof(float)*pitch*height);
h_internalAlloc = true;
}
//fprintf(stderr, "Alloc: H, D: %p %p\n", h_data, d_data);
}
CudaImage::CudaImage() :
d_data(NULL), h_data(NULL), t_data(NULL), d_internalAlloc(false), h_internalAlloc(false)
{
}
CudaImage::~CudaImage()
{
if (d_internalAlloc && d_data!=NULL)
safeCall(cudaFree(d_data));
d_data = NULL;
if (h_internalAlloc && h_data!=NULL)
free(h_data);
h_data = NULL;
if (t_data!=NULL)
safeCall(cudaFreeArray((cudaArray *)t_data));
t_data = NULL;
}
double CudaImage::Download()
{
//fprintf(stderr, "DL: H, D: %p %p\n", h_data, d_data);
//fprintf(stderr, "size: pitch: %d; height: %d; width %d\n", pitch, height, width);
TimerGPU timer(0);
if (d_data!=NULL && h_data!=NULL) {
safeCall(cudaMemcpy(d_data, h_data, sizeof(float)*pitch*height, cudaMemcpyHostToDevice));
}
double gpuTime = timer.read();
#ifdef VERBOSE
printf("Download time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double CudaImage::Readback()
{
TimerGPU timer(0);
int p = sizeof(float)*pitch;
safeCall(cudaMemcpy2D(h_data, p, d_data, p, sizeof(float)*width, height, cudaMemcpyDeviceToHost));
double gpuTime = timer.read();
#ifdef VERBOSE
printf("Readback time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double CudaImage::InitTexture()
{
TimerGPU timer(0);
cudaChannelFormatDesc t_desc = cudaCreateChannelDesc<float>();
safeCall(cudaMallocArray((cudaArray **)&t_data, &t_desc, pitch, height));
if (t_data==NULL)
printf("Failed to allocated texture data\n");
double gpuTime = timer.read();
#ifdef VERBOSE
printf("InitTexture time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double CudaImage::CopyToTexture(CudaImage &dst, bool host)
{
if (dst.t_data==NULL) {
printf("Error CopyToTexture: No texture data\n");
return 0.0;
}
if ((!host || h_data==NULL) && (host || d_data==NULL)) {
printf("Error CopyToTexture: No source data\n");
return 0.0;
}
TimerGPU timer(0);
if (host)
safeCall(cudaMemcpyToArray((cudaArray *)dst.t_data, 0, 0, h_data, sizeof(float)*pitch*dst.height, cudaMemcpyHostToDevice));
else
safeCall(cudaMemcpyToArray((cudaArray *)dst.t_data, 0, 0, d_data, sizeof(float)*pitch*dst.height, cudaMemcpyDeviceToDevice));
safeCall(cudaThreadSynchronize());
double gpuTime = timer.read();
#ifdef VERBOSE
printf("CopyToTexture time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
|
0fa4d910930386c46a7fbf4ef57fc68a1dab66fe.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
texture<float,1,hipReadModeElementType> texmem1;
texture<float,1,hipReadModeElementType> texmem2;
texture<float,1,hipReadModeElementType> texmem3;
texture<float,1,hipReadModeElementType> texmem4;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N, int iterations)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float Value1=0;
float Value2=0;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = A[tid];
I2[tid%THREADS_PER_BLOCK] = B[tid];
__syncthreads();
float sum = 0.0;
if(tid < N){
for(unsigned i=0; i<iterations; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
for(unsigned j=1; j<=2; ++j){
sum+=tex1Dfetch(texmem1,tid*j);
Value1 +=tex1Dfetch(texmem2,tid*j)+I1[(i+j)%THREADS_PER_BLOCK];
Value2 += I2[(i+j)%THREADS_PER_BLOCK]* I1[(i+j)%THREADS_PER_BLOCK]+Value1;
sum+=tex1Dfetch(texmem2,tid*j)+B[tid]+Value2;
Value1 += sqrt(abs(sum))+A[tid];
Value2 += tex1Dfetch(texmem3,tid*j)* I2[(i+j)%THREADS_PER_BLOCK];
sum/=tex1Dfetch(texmem4,tid*j)+A[tid];
}
A[tid*2] = sum+Value1;
B[tid] = A[tid*2]+A[tid];
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int iterations)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<iterations*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
hipMemcpyToSymbol(ConstArray1, array1, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol(ConstArray2, array2, sizeof(float) * THREADS_PER_BLOCK );
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(size1);
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
hipMalloc((void**) &device_texture1, size1);
hipMalloc((void**) &device_texture2, size1);
hipMalloc((void**) &device_texture3, size1);
hipMalloc((void**) &device_texture4, size1);
hipMemcpy(device_texture1, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture2, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture3, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture4, host_texture1, size1, hipMemcpyHostToDevice);
hipBindTexture(0, texmem1, device_texture1, size1);
hipBindTexture(0, texmem2, device_texture2, size1);
hipBindTexture(0, texmem3, device_texture3, size1);
hipBindTexture(0, texmem4, device_texture4, size1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A1, size1) );
checkCudaErrors( hipMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A1, h_A1, size1, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_A2, h_A2, size1, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
hipDeviceSynchronize();
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A1, d_A2, N, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
hipFree(d_A1);
if (d_A2)
hipFree(d_A2);
if (d_A3)
hipFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
} | 0fa4d910930386c46a7fbf4ef57fc68a1dab66fe.cu | // Includes
#include <stdio.h>
#include <stdlib.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
texture<float,1,cudaReadModeElementType> texmem1;
texture<float,1,cudaReadModeElementType> texmem2;
texture<float,1,cudaReadModeElementType> texmem3;
texture<float,1,cudaReadModeElementType> texmem4;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N, int iterations)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float Value1=0;
float Value2=0;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = A[tid];
I2[tid%THREADS_PER_BLOCK] = B[tid];
__syncthreads();
float sum = 0.0;
if(tid < N){
for(unsigned i=0; i<iterations; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
for(unsigned j=1; j<=2; ++j){
sum+=tex1Dfetch(texmem1,tid*j);
Value1 +=tex1Dfetch(texmem2,tid*j)+I1[(i+j)%THREADS_PER_BLOCK];
Value2 += I2[(i+j)%THREADS_PER_BLOCK]* I1[(i+j)%THREADS_PER_BLOCK]+Value1;
sum+=tex1Dfetch(texmem2,tid*j)+B[tid]+Value2;
Value1 += sqrt(abs(sum))+A[tid];
Value2 += tex1Dfetch(texmem3,tid*j)* I2[(i+j)%THREADS_PER_BLOCK];
sum/=tex1Dfetch(texmem4,tid*j)+A[tid];
}
A[tid*2] = sum+Value1;
B[tid] = A[tid*2]+A[tid];
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int iterations)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<iterations*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
cudaMemcpyToSymbol(ConstArray1, array1, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol(ConstArray2, array2, sizeof(float) * THREADS_PER_BLOCK );
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(size1);
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
cudaMalloc((void**) &device_texture1, size1);
cudaMalloc((void**) &device_texture2, size1);
cudaMalloc((void**) &device_texture3, size1);
cudaMalloc((void**) &device_texture4, size1);
cudaMemcpy(device_texture1, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture2, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture3, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture4, host_texture1, size1, cudaMemcpyHostToDevice);
cudaBindTexture(0, texmem1, device_texture1, size1);
cudaBindTexture(0, texmem2, device_texture2, size1);
cudaBindTexture(0, texmem3, device_texture3, size1);
cudaBindTexture(0, texmem4, device_texture4, size1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A1, size1) );
checkCudaErrors( cudaMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A1, h_A1, size1, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_A2, h_A2, size1, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
cudaThreadSynchronize();
checkCudaErrors(cudaEventRecord(start));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A1, d_A2, N, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
cudaFree(d_A1);
if (d_A2)
cudaFree(d_A2);
if (d_A3)
cudaFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
} |
e83aa1c68e3be8f919ad78a1d25a7cae044dff4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "Grid_hip.cuh"
int Grid::getGridType() const {
return type_;
}
void Grid::setGridType(int type) {
type_ = type;
}
void Grid::setPosition(float3 pos) {
pos_ = pos;
}
float3 Grid::getSize() const {
return size_;
}
void Grid::setSize(float3 size) {
size_ = size;
}
void Grid::setInterval(float3 interval) {
interval_ = interval;
}
int Grid::getHeliostatType() const {
return helio_type_;
}
void Grid::setHeliostatType(int helio_type) {
helio_type_ = helio_type;
}
int Grid::getStartHeliostatPosition() const {
return start_helio_pos_;
}
void Grid::setStartHeliostatPosition(int start_helio_pos) {
start_helio_pos_ = start_helio_pos;
}
int Grid::getNumberOfHeliostats() const {
return num_helios_;
}
void Grid::setNumberOfHeliostats(int num_helios) {
num_helios_ = num_helios;
}
int Grid::getBelongingReceiverIndex() const {
return belonging_receiver_index_;
}
void Grid::setBelongingReceiverIndex(int belonging_receiver_index) {
belonging_receiver_index_ = belonging_receiver_index;
}
| e83aa1c68e3be8f919ad78a1d25a7cae044dff4e.cu | #include "Grid.cuh"
int Grid::getGridType() const {
return type_;
}
void Grid::setGridType(int type) {
type_ = type;
}
void Grid::setPosition(float3 pos) {
pos_ = pos;
}
float3 Grid::getSize() const {
return size_;
}
void Grid::setSize(float3 size) {
size_ = size;
}
void Grid::setInterval(float3 interval) {
interval_ = interval;
}
int Grid::getHeliostatType() const {
return helio_type_;
}
void Grid::setHeliostatType(int helio_type) {
helio_type_ = helio_type;
}
int Grid::getStartHeliostatPosition() const {
return start_helio_pos_;
}
void Grid::setStartHeliostatPosition(int start_helio_pos) {
start_helio_pos_ = start_helio_pos;
}
int Grid::getNumberOfHeliostats() const {
return num_helios_;
}
void Grid::setNumberOfHeliostats(int num_helios) {
num_helios_ = num_helios;
}
int Grid::getBelongingReceiverIndex() const {
return belonging_receiver_index_;
}
void Grid::setBelongingReceiverIndex(int belonging_receiver_index) {
belonging_receiver_index_ = belonging_receiver_index;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.