hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
08ac209d52260683af8cbcbe8d8fa5dd502fe3ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include "device_launch_parameters.h"
#define N (33 * 1024)
__global__ void addKernel(int *a, int *b, int *c) {
// ch04 block N thread 1
// int tid = blockIdx.x;
// ch05 block 1 thread N
// int tid = threadIdx.x;
// ch05 block (N+127)/128 thread 128
// ch05 block 128 thread 128
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// if (tid < N) {
// c[tid] = a[tid] + b[tid];
// }
// ch05 block 128 thread 128
while (tid < N) {
c[tid] = a[tid] + b[tid];
tid = tid + blockDim.x * gridDim.x;
}
}
int main(void) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
HANDLE_ERROR(hipMalloc((void **)&dev_a, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void **)&dev_b, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void **)&dev_c, N * sizeof(int)));
for (int i = 0; i < N; i++) {
a[i] = -1;
b[i] = i * i;
}
HANDLE_ERROR(hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice));
// ch04 block N thread 1
// addKernel<<<N, 1>>>(dev_a, dev_b, dev_c);
// ch05 block 1 thread N
// addKernel << <1, N>> >(dev_a, dev_b, dev_c);
// ch05 block (N+127)/128 thread 128
// addKernel << <(N + 127) / 128, 128>> >(dev_a, dev_b, dev_c);
// ch05 block 128 thread 128
addKernel << <128, 128 >> >(dev_a, dev_b, dev_c);
HANDLE_ERROR(hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 0; i < N; i++) {
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 08ac209d52260683af8cbcbe8d8fa5dd502fe3ce.cu |
#include "common.h"
#include "device_launch_parameters.h"
#define N (33 * 1024)
__global__ void addKernel(int *a, int *b, int *c) {
// ch04 block N thread 1
// int tid = blockIdx.x;
// ch05 block 1 thread N
// int tid = threadIdx.x;
// ch05 block (N+127)/128 thread 128
// ch05 block 128 thread 128
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// if (tid < N) {
// c[tid] = a[tid] + b[tid];
// }
// ch05 block 128 thread 128
while (tid < N) {
c[tid] = a[tid] + b[tid];
tid = tid + blockDim.x * gridDim.x;
}
}
int main(void) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
HANDLE_ERROR(cudaMalloc((void **)&dev_a, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void **)&dev_b, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void **)&dev_c, N * sizeof(int)));
for (int i = 0; i < N; i++) {
a[i] = -1;
b[i] = i * i;
}
HANDLE_ERROR(cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice));
// ch04 block N thread 1
// addKernel<<<N, 1>>>(dev_a, dev_b, dev_c);
// ch05 block 1 thread N
// addKernel << <1, N>> >(dev_a, dev_b, dev_c);
// ch05 block (N+127)/128 thread 128
// addKernel << <(N + 127) / 128, 128>> >(dev_a, dev_b, dev_c);
// ch05 block 128 thread 128
addKernel << <128, 128 >> >(dev_a, dev_b, dev_c);
HANDLE_ERROR(cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < N; i++) {
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
9e61ab8bc0930b3babc6ff47840c4a4474f310c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2000-2021, Heiko Bauke
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// * Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
#include <cstdlib>
#include <iostream>
#include <vector>
#include <trng/yarn5s.hpp>
#include <trng/uniform01_dist.hpp>
__global__ void parallel_pi(long samples, long *in, trng::yarn5s r) {
long rank = threadIdx.x;
long size = blockDim.x;
r.jump(2 * (rank * samples / size)); // jump ahead
trng::uniform01_dist<float> u; // random number distribution
in[rank] = 0; // local number of points in circle
for (long i = rank * samples / size; i < (rank + 1) * samples / size; ++i) {
const float x = u(r), y = u(r); // choose random x- and y-coordinates
if (x * x + y * y <= 1) // is point in circle?
++in[rank]; // increase thread-local counter
}
}
int main(int argc, char *argv[]) {
const long samples{1000000l}; // total number of points in square
const int size{128}; // number of threads
long *in_device;
hipMalloc(&in_device, size * sizeof(*in_device));
trng::yarn5s r;
// start parallel Monte Carlo
hipLaunchKernelGGL(( parallel_pi), dim3(1), dim3(size), 0, 0, samples, in_device, r);
// gather results
std::vector<long> in(size);
hipMemcpy(in.data(), in_device, size * sizeof(*in), hipMemcpyDeviceToHost);
hipFree(in_device);
long sum{0};
for (int rank{0}; rank < size; ++rank)
sum += in[rank];
// print result
std::cout << "pi = " << 4.0 * sum / samples << std::endl;
return EXIT_SUCCESS;
}
| 9e61ab8bc0930b3babc6ff47840c4a4474f310c8.cu | // Copyright (c) 2000-2021, Heiko Bauke
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// * Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
#include <cstdlib>
#include <iostream>
#include <vector>
#include <trng/yarn5s.hpp>
#include <trng/uniform01_dist.hpp>
__global__ void parallel_pi(long samples, long *in, trng::yarn5s r) {
long rank = threadIdx.x;
long size = blockDim.x;
r.jump(2 * (rank * samples / size)); // jump ahead
trng::uniform01_dist<float> u; // random number distribution
in[rank] = 0; // local number of points in circle
for (long i = rank * samples / size; i < (rank + 1) * samples / size; ++i) {
const float x = u(r), y = u(r); // choose random x- and y-coordinates
if (x * x + y * y <= 1) // is point in circle?
++in[rank]; // increase thread-local counter
}
}
int main(int argc, char *argv[]) {
const long samples{1000000l}; // total number of points in square
const int size{128}; // number of threads
long *in_device;
cudaMalloc(&in_device, size * sizeof(*in_device));
trng::yarn5s r;
// start parallel Monte Carlo
parallel_pi<<<1, size>>>(samples, in_device, r);
// gather results
std::vector<long> in(size);
cudaMemcpy(in.data(), in_device, size * sizeof(*in), cudaMemcpyDeviceToHost);
cudaFree(in_device);
long sum{0};
for (int rank{0}; rank < size; ++rank)
sum += in[rank];
// print result
std::cout << "pi = " << 4.0 * sum / samples << std::endl;
return EXIT_SUCCESS;
}
|
d519445bd0c8a647cce37149cdc18954070df313.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
} | d519445bd0c8a647cce37149cdc18954070df313.cu | #include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
} |
7154b66397dce7c7db0eb2b3df3f1373de59a27d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <ctype.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "funcs.h"
#include "handleerror.h"
// Using the RED-BLACK alternating update scheme.
#define RED 0
#define BLACK 1
// Using a 2d grid of 16x16 threads. This grid should be safe in all modern gpus.
// Each thread corresponds to a point in the NxM grid.
const int num_threads = 16;
// declare constant memory on the device. these parameters stay constant throughout.
__device__ static int dev_N, dev_M;
__device__ static double dev_omega, dev_lambda, dev_dx, dev_dy;
// iterate is the gpu kernel that handles the gpu calls.
// the "red" parameter tells the function to alternate between red and black points in the grid.
__global__ void iterate(double* gvals, double *u, double* maxresid_per_block, int red);
// update is a gpu-only function that calculates the new "u" value
// and the residue per thread
__device__ void update(double* localu, double* localu_new, double* localg,
double* resid_per_thread);
// initialize is a gpu-only function that copies the u and g values from the global memory
// to the local memory.
__device__ void initialize(double* localu, double* localu_new, double* localg,
double* gvals, double* u);
// takes the residual obtained per thread and calculates the maximum value for each block
__device__ double getMaxResidPerBlock(double* resid_per_thread);
int main(int argc, char* argv[]){
/* Setting up from the input parameters */
//=====================================================================================
int N = 128; // Number of y points
double omega = 1.5; // Value of relaxation parameter.
double tol = 1e-9; // Tolerance. Default value is 1e-9.
int maxiter = 1000; // Maximum number of iterations.
const char* fname = "sources.out"; // Output file name
double lambda = 100.; // lambda parameter.
if (argc > 1)
N = atoi(argv[1]);
if (argc > 2)
omega = atof(argv[2]);
if (argc > 3){
tol = atof(argv[3]);
}
if (argc > 4){
maxiter = atoi(argv[4]);
}
if (argc > 5){
fname = argv[5];
}
if (argc > 6){
lambda = atof(argv[6]);
}
int M = 2 * N - 1;
#ifdef VERBOSE
// Using a preprocessor flag to control verbose/debug output
printf("Parameters are:\n");
printf("number of y-points (N) : %d\n", N);
printf("number of x-points (M) : %d\n", M);
printf("omega : %f\n", omega);
printf("tolerance: %f\n", tol);
printf("max iterations: %d\n", maxiter);
printf("Output fname: %s\n", fname);
printf("lambda: %f\n", lambda);
#endif
double *gvals = (double*)malloc(M * N * sizeof(double));
double* u = (double*)malloc(N * M * sizeof(double));
double dx, dy, maxresid;
double x_min = -2., x_max = 2., y_min=-1, y_max=1;
gvalsCalc(x_min, x_max, y_min, y_max, M, N, lambda, gvals, &dx, &dy);
init2dArray(M, N, u);
maxresid = 1 + tol;
// completed initial parameter setup.
//========================================================================================
clock_t begin = clock();
// Setting the number of blocks. The blocks at the edges can have threads that are not utilized
int num_blocks_x = M / num_threads + (M % num_threads ? 1 : 0);
int num_blocks_y = N / num_threads + (N % num_threads ? 1 : 0);
#ifdef VERBOSE
printf("threads %d\n", num_threads);
printf("blocks %d %d\n", num_blocks_x, num_blocks_y);
#endif
double* resid_per_block_red = (double*)malloc(num_blocks_x * num_blocks_y * sizeof(double));
double* resid_per_block_black = (double*)malloc(num_blocks_x * num_blocks_y * sizeof(double));
// Copy the constants to the constant memory on the gpu
HANDLE_ERROR(hipMemcpyToSymbol(dev_N, &N, sizeof(int)));
HANDLE_ERROR(hipMemcpyToSymbol(dev_M, &M, sizeof(int)));
HANDLE_ERROR(hipMemcpyToSymbol(dev_omega, &omega, sizeof(double)));
HANDLE_ERROR(hipMemcpyToSymbol(dev_lambda, &lambda, sizeof(double)));
HANDLE_ERROR(hipMemcpyToSymbol(dev_dx, &dx, sizeof(double)));
HANDLE_ERROR(hipMemcpyToSymbol(dev_dy, &dy, sizeof(double)));
// initialize the global memory on the gpu.
double* dev_u;
double* dev_gvals;
double* dev_resid_per_block; // the max residual is calculated up to per block within the gpu
HANDLE_ERROR(hipMalloc((void**)&dev_u, N * M * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&dev_gvals, N * M * sizeof(double)));
HANDLE_ERROR(hipMalloc((void**)&dev_resid_per_block, M * N * sizeof(double)));
HANDLE_ERROR(hipMemcpy(dev_u, u, M * N * sizeof(double), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_gvals, gvals, M * N * sizeof(double),
hipMemcpyHostToDevice));
// Setting up the blocks/threads grid.
dim3 meshBlocks(num_blocks_x, num_blocks_y);
dim3 meshThreads(num_threads, num_threads);
int iter;
for (iter=0; (iter < maxiter && maxresid > tol); iter++){
// The actual update iterations. The RED and BLACK points are updated with sequential kernel calls.
// Separately storing the residual per block from the red and black points.
hipLaunchKernelGGL(( iterate), dim3(meshBlocks), dim3(meshThreads), 0, 0, dev_gvals, dev_u, dev_resid_per_block, RED);
HANDLE_ERROR(hipMemcpy(resid_per_block_red, dev_resid_per_block, num_blocks_x * num_blocks_y * sizeof(double),
hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( iterate), dim3(meshBlocks), dim3(meshThreads), 0, 0, dev_gvals, dev_u, dev_resid_per_block, BLACK);
HANDLE_ERROR(hipMemcpy(resid_per_block_black, dev_resid_per_block, num_blocks_x * num_blocks_y * sizeof(double),
hipMemcpyDeviceToHost));
// Comparing all residuals from the red and black points to calculate the maximum residual.
maxresid = 0;
for(int by=0; by<num_blocks_y; ++by){
for(int bx=0; bx<num_blocks_x; ++bx){
maxresid = fmax(resid_per_block_red[by * num_blocks_x + bx], maxresid);
maxresid = fmax(resid_per_block_black[by * num_blocks_x + bx], maxresid);
}
}
#ifdef VERBOSE
printf("iter %d maxresid %f\n", iter, maxresid);
#endif
}
// Copying back the final grid points
HANDLE_ERROR(hipMemcpy(u, dev_u, M * N * sizeof(double),
hipMemcpyDeviceToHost));
printf("runtime %g\n", (double)(clock()-begin)/CLOCKS_PER_SEC);
FILE* output = fopen(fname, "w");
fwrite(u, sizeof(double), N * M, output);
fclose(output);
free(u);
free(gvals);
free(resid_per_block_red);
free(resid_per_block_black);
HANDLE_ERROR(hipFree(dev_u));
HANDLE_ERROR(hipFree(dev_gvals));
HANDLE_ERROR(hipFree(dev_resid_per_block));
return 0;
}
// This is the workhorse function..
__global__ void iterate(double* gvals, double* u, double* resid_per_block, int red){
// using the shared memory for information needed blockwise.
__shared__ double localu[(num_threads + 2) * (num_threads + 2)];
__shared__ double localg[num_threads * num_threads];
// this is where the updated gridpoints are stored, before copying back to the global memory.
__shared__ double localu_new[num_threads * num_threads];
// initializing the array of residuals
// The blocks at the edges can have unutilized threads, which don't update the corresponding
// item in the array of residuals.
// To avoid using uninitialized values for the residuals, set them all to zero in advance.
__shared__ double resid_per_thread[num_threads * num_threads];
resid_per_thread[threadIdx.y * num_threads + threadIdx.x] = 0;
int g_ix = blockIdx.x * num_threads + threadIdx.x;
int g_iy = blockIdx.y * num_threads + threadIdx.y;
int g_i = g_iy * dev_M + g_ix;
// Ensuring that the calculation only happens at the threads that correspond to actual grid points.
if (g_ix < dev_M && g_iy < dev_N){
int t_i = threadIdx.y * num_threads + threadIdx.x;
// copying data from global to shared memory
initialize(localu, localu_new, localg, gvals, u);
__syncthreads();
// This is an XOR trick to select only red or only black values per iteration.
int cond2 = ((g_ix % 2 == 0) && (g_iy % 2 == 0));
int cond3 = ((g_ix % 2 != 0) && (g_iy % 2 != 0));
if (red != (cond2 || cond3) ){
// the actual updates
update(localu, localu_new, localg, resid_per_thread);
}
// copy back to global memory.
u[g_i] = localu_new[t_i];
__syncthreads();
// calculate the maximum residue per block from the individual threads.
if (threadIdx.x == 0 && threadIdx.y == 0){
resid_per_block[blockIdx.y * gridDim.y + blockIdx.x] = getMaxResidPerBlock(resid_per_thread);
}
}
__syncthreads();
}
__device__ void update(double* localu, double* localu_new, double* localg, double* resid_per_thread){
int l_ix = threadIdx.x + 1;
int l_iy = threadIdx.y + 1;
int l_dim = num_threads + 2;
int l_i = l_iy * l_dim + l_ix;
int t_i = threadIdx.y * num_threads + threadIdx.x;
int g_ix = blockIdx.x * num_threads + threadIdx.x;
int g_iy = blockIdx.y * num_threads + threadIdx.y;
double usum = 0;
// The conditionals are to make sure that we are only using values from within the grid (not periodic)
// for the updates.
if (g_iy > 0){ // for y =0, we update from points indexed as y-1
usum += localu[(l_iy - 1) * l_dim + l_ix] - localu[l_i];
}
if (g_iy < (dev_N - 1)){ // for y = (N -1)
usum += localu[(l_iy + 1) * l_dim + l_ix] - localu[l_i];
}
if (g_ix > 0){ // for x=0
usum += localu[l_iy * l_dim + l_ix -1] - localu[l_i];
}
if (g_ix < (dev_M - 1)){ // for x = (M - 1)
usum += localu[l_iy * l_dim + l_ix + 1] - localu[l_i];
}
double resid = 0.25 * (usum - dev_dx * dev_dy * localg[t_i]);
localu_new[t_i] = localu[l_i] + dev_omega * resid;
resid_per_thread[t_i] = resid;
}
__device__ void initialize(double* localu, double* localu_new, double* localg, double* gvals, double* u){
int g_ix = blockIdx.x * num_threads + threadIdx.x;
int g_iy = blockIdx.y * num_threads + threadIdx.y;
int g_i = g_iy * dev_M + g_ix;
// each block contains an additional two rows and two columns of ghost points at the edges.
// the actual update points are indexed by l_ix.
int l_ix = threadIdx.x + 1;
int l_iy = threadIdx.y + 1;
int l_dim = num_threads + 2;
int l_i = l_iy * l_dim + l_ix;
// For convenience, we assume that the grid is periodic for the initialization only.
// the updates don't assume the same.
int g_imx = (g_ix + dev_M -1) % dev_M;
int g_imy = (g_iy + dev_N -1) % dev_N;
int g_ipx = (g_ix + 1) % dev_M;
int g_ipy = (g_iy + 1) % dev_N;
int t_i = threadIdx.y * num_threads + threadIdx.x;
localu[l_i] = u[g_i];
localu_new[t_i] = u[g_i];
localg[t_i] = gvals[g_i];
// The threads at the edges additionally initialize the adjacent ghost point.
if (threadIdx.x == 0){
localu[l_iy * l_dim] = u[g_iy * dev_M + g_imx];
}
if (threadIdx.x == num_threads - 1){
localu[l_iy * l_dim + l_ix + 1] = u[g_iy * dev_M + g_ipx];
}
if (threadIdx.y == 0){
localu[l_ix] = u[g_imy * dev_M + g_ix];
}
if (threadIdx.y == num_threads - 1){
localu[(l_iy + 1) * l_dim + l_ix] = u[g_ipy * dev_M + g_ix];
}
}
__device__ double getMaxResidPerBlock(double* resid_per_thread){
double maxresid = 0;
for (int ry=0; ry < num_threads; ry++){
for (int rx=0; rx < num_threads; rx++){
maxresid = fmax(maxresid, fabs(resid_per_thread[ry * num_threads + rx]));
}
}
return maxresid;
} | 7154b66397dce7c7db0eb2b3df3f1373de59a27d.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
#include <ctype.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "funcs.h"
#include "handleerror.h"
// Using the RED-BLACK alternating update scheme.
#define RED 0
#define BLACK 1
// Using a 2d grid of 16x16 threads. This grid should be safe in all modern gpus.
// Each thread corresponds to a point in the NxM grid.
const int num_threads = 16;
// declare constant memory on the device. these parameters stay constant throughout.
__device__ static int dev_N, dev_M;
__device__ static double dev_omega, dev_lambda, dev_dx, dev_dy;
// iterate is the gpu kernel that handles the gpu calls.
// the "red" parameter tells the function to alternate between red and black points in the grid.
__global__ void iterate(double* gvals, double *u, double* maxresid_per_block, int red);
// update is a gpu-only function that calculates the new "u" value
// and the residue per thread
__device__ void update(double* localu, double* localu_new, double* localg,
double* resid_per_thread);
// initialize is a gpu-only function that copies the u and g values from the global memory
// to the local memory.
__device__ void initialize(double* localu, double* localu_new, double* localg,
double* gvals, double* u);
// takes the residual obtained per thread and calculates the maximum value for each block
__device__ double getMaxResidPerBlock(double* resid_per_thread);
int main(int argc, char* argv[]){
/* Setting up from the input parameters */
//=====================================================================================
int N = 128; // Number of y points
double omega = 1.5; // Value of relaxation parameter.
double tol = 1e-9; // Tolerance. Default value is 1e-9.
int maxiter = 1000; // Maximum number of iterations.
const char* fname = "sources.out"; // Output file name
double lambda = 100.; // lambda parameter.
if (argc > 1)
N = atoi(argv[1]);
if (argc > 2)
omega = atof(argv[2]);
if (argc > 3){
tol = atof(argv[3]);
}
if (argc > 4){
maxiter = atoi(argv[4]);
}
if (argc > 5){
fname = argv[5];
}
if (argc > 6){
lambda = atof(argv[6]);
}
int M = 2 * N - 1;
#ifdef VERBOSE
// Using a preprocessor flag to control verbose/debug output
printf("Parameters are:\n");
printf("number of y-points (N) : %d\n", N);
printf("number of x-points (M) : %d\n", M);
printf("omega : %f\n", omega);
printf("tolerance: %f\n", tol);
printf("max iterations: %d\n", maxiter);
printf("Output fname: %s\n", fname);
printf("lambda: %f\n", lambda);
#endif
double *gvals = (double*)malloc(M * N * sizeof(double));
double* u = (double*)malloc(N * M * sizeof(double));
double dx, dy, maxresid;
double x_min = -2., x_max = 2., y_min=-1, y_max=1;
gvalsCalc(x_min, x_max, y_min, y_max, M, N, lambda, gvals, &dx, &dy);
init2dArray(M, N, u);
maxresid = 1 + tol;
// completed initial parameter setup.
//========================================================================================
clock_t begin = clock();
// Setting the number of blocks. The blocks at the edges can have threads that are not utilized
int num_blocks_x = M / num_threads + (M % num_threads ? 1 : 0);
int num_blocks_y = N / num_threads + (N % num_threads ? 1 : 0);
#ifdef VERBOSE
printf("threads %d\n", num_threads);
printf("blocks %d %d\n", num_blocks_x, num_blocks_y);
#endif
double* resid_per_block_red = (double*)malloc(num_blocks_x * num_blocks_y * sizeof(double));
double* resid_per_block_black = (double*)malloc(num_blocks_x * num_blocks_y * sizeof(double));
// Copy the constants to the constant memory on the gpu
HANDLE_ERROR(cudaMemcpyToSymbol(dev_N, &N, sizeof(int)));
HANDLE_ERROR(cudaMemcpyToSymbol(dev_M, &M, sizeof(int)));
HANDLE_ERROR(cudaMemcpyToSymbol(dev_omega, &omega, sizeof(double)));
HANDLE_ERROR(cudaMemcpyToSymbol(dev_lambda, &lambda, sizeof(double)));
HANDLE_ERROR(cudaMemcpyToSymbol(dev_dx, &dx, sizeof(double)));
HANDLE_ERROR(cudaMemcpyToSymbol(dev_dy, &dy, sizeof(double)));
// initialize the global memory on the gpu.
double* dev_u;
double* dev_gvals;
double* dev_resid_per_block; // the max residual is calculated up to per block within the gpu
HANDLE_ERROR(cudaMalloc((void**)&dev_u, N * M * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&dev_gvals, N * M * sizeof(double)));
HANDLE_ERROR(cudaMalloc((void**)&dev_resid_per_block, M * N * sizeof(double)));
HANDLE_ERROR(cudaMemcpy(dev_u, u, M * N * sizeof(double), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_gvals, gvals, M * N * sizeof(double),
cudaMemcpyHostToDevice));
// Setting up the blocks/threads grid.
dim3 meshBlocks(num_blocks_x, num_blocks_y);
dim3 meshThreads(num_threads, num_threads);
int iter;
for (iter=0; (iter < maxiter && maxresid > tol); iter++){
// The actual update iterations. The RED and BLACK points are updated with sequential kernel calls.
// Separately storing the residual per block from the red and black points.
iterate<<<meshBlocks, meshThreads>>>(dev_gvals, dev_u, dev_resid_per_block, RED);
HANDLE_ERROR(cudaMemcpy(resid_per_block_red, dev_resid_per_block, num_blocks_x * num_blocks_y * sizeof(double),
cudaMemcpyDeviceToHost));
iterate<<<meshBlocks, meshThreads>>>(dev_gvals, dev_u, dev_resid_per_block, BLACK);
HANDLE_ERROR(cudaMemcpy(resid_per_block_black, dev_resid_per_block, num_blocks_x * num_blocks_y * sizeof(double),
cudaMemcpyDeviceToHost));
// Comparing all residuals from the red and black points to calculate the maximum residual.
maxresid = 0;
for(int by=0; by<num_blocks_y; ++by){
for(int bx=0; bx<num_blocks_x; ++bx){
maxresid = fmax(resid_per_block_red[by * num_blocks_x + bx], maxresid);
maxresid = fmax(resid_per_block_black[by * num_blocks_x + bx], maxresid);
}
}
#ifdef VERBOSE
printf("iter %d maxresid %f\n", iter, maxresid);
#endif
}
// Copying back the final grid points
HANDLE_ERROR(cudaMemcpy(u, dev_u, M * N * sizeof(double),
cudaMemcpyDeviceToHost));
printf("runtime %g\n", (double)(clock()-begin)/CLOCKS_PER_SEC);
FILE* output = fopen(fname, "w");
fwrite(u, sizeof(double), N * M, output);
fclose(output);
free(u);
free(gvals);
free(resid_per_block_red);
free(resid_per_block_black);
HANDLE_ERROR(cudaFree(dev_u));
HANDLE_ERROR(cudaFree(dev_gvals));
HANDLE_ERROR(cudaFree(dev_resid_per_block));
return 0;
}
// This is the workhorse function..
__global__ void iterate(double* gvals, double* u, double* resid_per_block, int red){
// using the shared memory for information needed blockwise.
__shared__ double localu[(num_threads + 2) * (num_threads + 2)];
__shared__ double localg[num_threads * num_threads];
// this is where the updated gridpoints are stored, before copying back to the global memory.
__shared__ double localu_new[num_threads * num_threads];
// initializing the array of residuals
// The blocks at the edges can have unutilized threads, which don't update the corresponding
// item in the array of residuals.
// To avoid using uninitialized values for the residuals, set them all to zero in advance.
__shared__ double resid_per_thread[num_threads * num_threads];
resid_per_thread[threadIdx.y * num_threads + threadIdx.x] = 0;
int g_ix = blockIdx.x * num_threads + threadIdx.x;
int g_iy = blockIdx.y * num_threads + threadIdx.y;
int g_i = g_iy * dev_M + g_ix;
// Ensuring that the calculation only happens at the threads that correspond to actual grid points.
if (g_ix < dev_M && g_iy < dev_N){
int t_i = threadIdx.y * num_threads + threadIdx.x;
// copying data from global to shared memory
initialize(localu, localu_new, localg, gvals, u);
__syncthreads();
// This is an XOR trick to select only red or only black values per iteration.
int cond2 = ((g_ix % 2 == 0) && (g_iy % 2 == 0));
int cond3 = ((g_ix % 2 != 0) && (g_iy % 2 != 0));
if (red != (cond2 || cond3) ){
// the actual updates
update(localu, localu_new, localg, resid_per_thread);
}
// copy back to global memory.
u[g_i] = localu_new[t_i];
__syncthreads();
// calculate the maximum residue per block from the individual threads.
if (threadIdx.x == 0 && threadIdx.y == 0){
resid_per_block[blockIdx.y * gridDim.y + blockIdx.x] = getMaxResidPerBlock(resid_per_thread);
}
}
__syncthreads();
}
__device__ void update(double* localu, double* localu_new, double* localg, double* resid_per_thread){
int l_ix = threadIdx.x + 1;
int l_iy = threadIdx.y + 1;
int l_dim = num_threads + 2;
int l_i = l_iy * l_dim + l_ix;
int t_i = threadIdx.y * num_threads + threadIdx.x;
int g_ix = blockIdx.x * num_threads + threadIdx.x;
int g_iy = blockIdx.y * num_threads + threadIdx.y;
double usum = 0;
// The conditionals are to make sure that we are only using values from within the grid (not periodic)
// for the updates.
if (g_iy > 0){ // for y =0, we update from points indexed as y-1
usum += localu[(l_iy - 1) * l_dim + l_ix] - localu[l_i];
}
if (g_iy < (dev_N - 1)){ // for y = (N -1)
usum += localu[(l_iy + 1) * l_dim + l_ix] - localu[l_i];
}
if (g_ix > 0){ // for x=0
usum += localu[l_iy * l_dim + l_ix -1] - localu[l_i];
}
if (g_ix < (dev_M - 1)){ // for x = (M - 1)
usum += localu[l_iy * l_dim + l_ix + 1] - localu[l_i];
}
double resid = 0.25 * (usum - dev_dx * dev_dy * localg[t_i]);
localu_new[t_i] = localu[l_i] + dev_omega * resid;
resid_per_thread[t_i] = resid;
}
__device__ void initialize(double* localu, double* localu_new, double* localg, double* gvals, double* u){
int g_ix = blockIdx.x * num_threads + threadIdx.x;
int g_iy = blockIdx.y * num_threads + threadIdx.y;
int g_i = g_iy * dev_M + g_ix;
// each block contains an additional two rows and two columns of ghost points at the edges.
// the actual update points are indexed by l_ix.
int l_ix = threadIdx.x + 1;
int l_iy = threadIdx.y + 1;
int l_dim = num_threads + 2;
int l_i = l_iy * l_dim + l_ix;
// For convenience, we assume that the grid is periodic for the initialization only.
// the updates don't assume the same.
int g_imx = (g_ix + dev_M -1) % dev_M;
int g_imy = (g_iy + dev_N -1) % dev_N;
int g_ipx = (g_ix + 1) % dev_M;
int g_ipy = (g_iy + 1) % dev_N;
int t_i = threadIdx.y * num_threads + threadIdx.x;
localu[l_i] = u[g_i];
localu_new[t_i] = u[g_i];
localg[t_i] = gvals[g_i];
// The threads at the edges additionally initialize the adjacent ghost point.
if (threadIdx.x == 0){
localu[l_iy * l_dim] = u[g_iy * dev_M + g_imx];
}
if (threadIdx.x == num_threads - 1){
localu[l_iy * l_dim + l_ix + 1] = u[g_iy * dev_M + g_ipx];
}
if (threadIdx.y == 0){
localu[l_ix] = u[g_imy * dev_M + g_ix];
}
if (threadIdx.y == num_threads - 1){
localu[(l_iy + 1) * l_dim + l_ix] = u[g_ipy * dev_M + g_ix];
}
}
__device__ double getMaxResidPerBlock(double* resid_per_thread){
double maxresid = 0;
for (int ry=0; ry < num_threads; ry++){
for (int rx=0; rx < num_threads; rx++){
maxresid = fmax(maxresid, fabs(resid_per_thread[ry * num_threads + rx]));
}
}
return maxresid;
} |
51b5dae5b4bd6f734d6807759a68bd75af184bd3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
__global__ void ReLUForward_With_Mask(const int n, const Dtype* in, Dtype* out, Dtype* out_mask,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
out_mask[index] = in[index] > 0 ? 1 : 0;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const bool use_top_mask = top.size() > 1;
Dtype* top_mask = NULL;
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
hipLaunchKernelGGL(( ReLUForward_With_Mask<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, top_mask, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
| 51b5dae5b4bd6f734d6807759a68bd75af184bd3.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
__global__ void ReLUForward_With_Mask(const int n, const Dtype* in, Dtype* out, Dtype* out_mask,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
out_mask[index] = in[index] > 0 ? 1 : 0;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const bool use_top_mask = top.size() > 1;
Dtype* top_mask = NULL;
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
ReLUForward_With_Mask<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, top_mask, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
else {
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
|
faa7dc3afdbe6e8b0c24be33ae4c7040355d5414.hip | // !!! This is a file automatically generated by hipify!!!
//
// CUDA DCA Driver
//
//This file invokes all of the necessary function calls to prepare
//and simulate a compound pendulum system through the use of the
//recursive DCA algorithm. The majority of this algorithm is run
//on the gpu. Output is created in a format that is
//readable in python for answer checking and graphing purposes.
//Included Files
#include <malloc.h>
#include <iostream>
#include <math.h>
#include "funct_bin/classes.h"
#include "d_code/deviceDisassemble.h"
#include "d_code/deviceAssemble.h"
#include "d_code/deviceInitialize.h"
#include "d_code/deviceFuncts.h"
#include "funct_bin/npy.h"
#include <math.h>
#include <fstream>
#include <limits>
//Function Prototypes
// Function found in RK45.cu
void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[],int gpu, int data);
//Main function
int main()
{
int data=1;
int n=0;
std::ofstream timedata;
std::ofstream timedata2;
timedata2.open("cpuassembletime2.mtx");
timedata.open("gpuassembletime2.mtx");
//Variable Declarations
//Variable Declarations
//Initial conditions
//List of joints between bodies NOTE: This joint list is not used in this version
/////////////////////////////////////////////////////
//Matrix Output Setup
//int shape1[2] = { tlen , 2*n }, fortran_order = 0; //Shape of solution matrix
//int shape2[2] = { 2 , n+1 }; //Shape of matrix holding information to calculate the energy
//double Vals[2][n+1]; //Matrix holding information to calculate the energy
typedef std::numeric_limits< double > dbl;
std::cout.precision(dbl::digits10);
//myfile2<<tstep<<" ";
//Vals[0][0]=tstep; //Save the length of a timestep for plotting purposes
//Vals[1][0]=tfinal; //Save the final time for plotting purposes
hipEvent_t beginEvent;
hipEvent_t endEvent;
hipEventCreate( &beginEvent );
hipEventCreate( &endEvent );
//System Initialization
//Save the initial conditions in the solution matrix
//myfile << "\n";
float timeValue;
while(n<80000)
{
if(n<500)
{
n+=10;
}
else if( n<2000)
{
n+=100;
}
else if(n< 10000)
{
n+= 1000;
}
else
{
n+=10000;
}
double *Zs;
double *m=(double*)malloc(sizeof(double)*n);
double *Xs=(double*)malloc(sizeof(double)*n*5*5);
//std::ofstream myfile;
//std::ofstream myfile2;
//myfile2.open("Vals.mtx");
//myfile.open ("output.mtx");
//System Setup
Zs = (double*)malloc(sizeof(double)*n*26*6);
hipEventRecord( beginEvent, 0 );
RecDCA(Zs, n, 0,m,0,Xs,1,data);
hipEventRecord( endEvent, 0 );
hipEventSynchronize( endEvent );
hipEventElapsedTime( &timeValue, beginEvent, endEvent );
timedata<< timeValue << " ";
hipEventRecord(beginEvent,0);
RecDCA(Zs,n,0,m,0,Xs,0,0);
hipEventRecord(endEvent,0);
hipEventSynchronize(endEvent);
hipEventElapsedTime( &timeValue, beginEvent, endEvent );
timedata2<<timeValue<<" ";
if ( 0 != hipGetLastError() )
{
printf( "Error!\n" );
}
std::cout<<"xxx"<<hipGetLastError()<<"xxx"<<std::endl;
std::cout << n << std::endl;
free(Zs);
free(m);
free(Xs);
}
timedata2.close();
timedata.close();
return EXIT_SUCCESS; //Program completed successfully
}
| faa7dc3afdbe6e8b0c24be33ae4c7040355d5414.cu | //
// CUDA DCA Driver
//
//This file invokes all of the necessary function calls to prepare
//and simulate a compound pendulum system through the use of the
//recursive DCA algorithm. The majority of this algorithm is run
//on the gpu. Output is created in a format that is
//readable in python for answer checking and graphing purposes.
//Included Files
#include <malloc.h>
#include <iostream>
#include <math.h>
#include "funct_bin/classes.h"
#include "d_code/deviceDisassemble.h"
#include "d_code/deviceAssemble.h"
#include "d_code/deviceInitialize.h"
#include "d_code/deviceFuncts.h"
#include "funct_bin/npy.h"
#include <math.h>
#include <fstream>
#include <limits>
//Function Prototypes
// Function found in RK45.cu
void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[],int gpu, int data);
//Main function
int main()
{
int data=1;
int n=0;
std::ofstream timedata;
std::ofstream timedata2;
timedata2.open("cpuassembletime2.mtx");
timedata.open("gpuassembletime2.mtx");
//Variable Declarations
//Variable Declarations
//Initial conditions
//List of joints between bodies NOTE: This joint list is not used in this version
/////////////////////////////////////////////////////
//Matrix Output Setup
//int shape1[2] = { tlen , 2*n }, fortran_order = 0; //Shape of solution matrix
//int shape2[2] = { 2 , n+1 }; //Shape of matrix holding information to calculate the energy
//double Vals[2][n+1]; //Matrix holding information to calculate the energy
typedef std::numeric_limits< double > dbl;
std::cout.precision(dbl::digits10);
//myfile2<<tstep<<" ";
//Vals[0][0]=tstep; //Save the length of a timestep for plotting purposes
//Vals[1][0]=tfinal; //Save the final time for plotting purposes
cudaEvent_t beginEvent;
cudaEvent_t endEvent;
cudaEventCreate( &beginEvent );
cudaEventCreate( &endEvent );
//System Initialization
//Save the initial conditions in the solution matrix
//myfile << "\n";
float timeValue;
while(n<80000)
{
if(n<500)
{
n+=10;
}
else if( n<2000)
{
n+=100;
}
else if(n< 10000)
{
n+= 1000;
}
else
{
n+=10000;
}
double *Zs;
double *m=(double*)malloc(sizeof(double)*n);
double *Xs=(double*)malloc(sizeof(double)*n*5*5);
//std::ofstream myfile;
//std::ofstream myfile2;
//myfile2.open("Vals.mtx");
//myfile.open ("output.mtx");
//System Setup
Zs = (double*)malloc(sizeof(double)*n*26*6);
cudaEventRecord( beginEvent, 0 );
RecDCA(Zs, n, 0,m,0,Xs,1,data);
cudaEventRecord( endEvent, 0 );
cudaEventSynchronize( endEvent );
cudaEventElapsedTime( &timeValue, beginEvent, endEvent );
timedata<< timeValue << " ";
cudaEventRecord(beginEvent,0);
RecDCA(Zs,n,0,m,0,Xs,0,0);
cudaEventRecord(endEvent,0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime( &timeValue, beginEvent, endEvent );
timedata2<<timeValue<<" ";
if ( 0 != cudaGetLastError() )
{
printf( "Error!\n" );
}
std::cout<<"xxx"<<cudaGetLastError()<<"xxx"<<std::endl;
std::cout << n << std::endl;
free(Zs);
free(m);
free(Xs);
}
timedata2.close();
timedata.close();
return EXIT_SUCCESS; //Program completed successfully
}
|
973608841a2daf9ff2a63fb19fadad998ca7549a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void | 973608841a2daf9ff2a63fb19fadad998ca7549a.cu |
__global__ void |
34cce6143310216fd77770c2603c3a50c2418719.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "kernel_gpu_utils.h"
namespace SCAMP {
int get_blocksz(Tile *t) {
switch (t->info()->fp_type) {
case PRECISION_DOUBLE:
return BLOCKSZ_DP;
case PRECISION_MIXED:
case PRECISION_SINGLE:
return BLOCKSZ_SP;
}
return 0;
}
int FPTypeSize(SCAMPPrecisionType dtype) {
switch (dtype) {
case PRECISION_DOUBLE:
return sizeof(double);
case PRECISION_MIXED:
case PRECISION_SINGLE:
return sizeof(float);
case PRECISION_INVALID:
return -1;
}
return -1;
}
int GetTileHeight(SCAMPPrecisionType dtype) {
switch (dtype) {
case PRECISION_DOUBLE:
return TILE_HEIGHT_DP;
case PRECISION_MIXED:
case PRECISION_SINGLE:
return TILE_HEIGHT_SP;
case PRECISION_INVALID:
return -1;
}
return -1;
}
size_t GetProfileTypeSizeInternalGPU(SCAMPProfileType type) {
switch (type) {
case PROFILE_TYPE_SUM_THRESH:
return sizeof(double);
case PROFILE_TYPE_1NN_INDEX:
return sizeof(uint64_t);
case PROFILE_TYPE_1NN:
return sizeof(float);
case PROFILE_TYPE_APPROX_ALL_NEIGHBORS:
case PROFILE_TYPE_MATRIX_SUMMARY:
case PROFILE_TYPE_KNN:
return sizeof(uint64_t);
default:
throw SCAMPException(
"Error: Could not determine size of profile elements");
}
}
int get_smem(const OpInfo *info, uint64_t blocksz) {
constexpr int num_shared_variables = 3;
int intermediate_data_size = FPTypeSize(info->fp_type);
int tile_height = GetTileHeight(info->fp_type);
int tile_width = blocksz * DIAGS_PER_THREAD + tile_height;
int smem = (tile_width + tile_height) *
(num_shared_variables + info->opt_args.num_extra_operands) *
intermediate_data_size;
int profile_data_size = GetProfileTypeSizeInternalGPU(info->profile_type);
if (info->computing_cols) {
smem += tile_width * profile_data_size;
}
if (info->computing_rows) {
smem += tile_height * profile_data_size;
}
if (NeedsCheckIfDone(info->profile_type)) {
smem += 2 * sizeof(uint64_t);
}
return smem;
}
} // namespace SCAMP
| 34cce6143310216fd77770c2603c3a50c2418719.cu | #include <cuda_runtime.h>
#include "kernel_gpu_utils.h"
namespace SCAMP {
int get_blocksz(Tile *t) {
switch (t->info()->fp_type) {
case PRECISION_DOUBLE:
return BLOCKSZ_DP;
case PRECISION_MIXED:
case PRECISION_SINGLE:
return BLOCKSZ_SP;
}
return 0;
}
int FPTypeSize(SCAMPPrecisionType dtype) {
switch (dtype) {
case PRECISION_DOUBLE:
return sizeof(double);
case PRECISION_MIXED:
case PRECISION_SINGLE:
return sizeof(float);
case PRECISION_INVALID:
return -1;
}
return -1;
}
int GetTileHeight(SCAMPPrecisionType dtype) {
switch (dtype) {
case PRECISION_DOUBLE:
return TILE_HEIGHT_DP;
case PRECISION_MIXED:
case PRECISION_SINGLE:
return TILE_HEIGHT_SP;
case PRECISION_INVALID:
return -1;
}
return -1;
}
size_t GetProfileTypeSizeInternalGPU(SCAMPProfileType type) {
switch (type) {
case PROFILE_TYPE_SUM_THRESH:
return sizeof(double);
case PROFILE_TYPE_1NN_INDEX:
return sizeof(uint64_t);
case PROFILE_TYPE_1NN:
return sizeof(float);
case PROFILE_TYPE_APPROX_ALL_NEIGHBORS:
case PROFILE_TYPE_MATRIX_SUMMARY:
case PROFILE_TYPE_KNN:
return sizeof(uint64_t);
default:
throw SCAMPException(
"Error: Could not determine size of profile elements");
}
}
int get_smem(const OpInfo *info, uint64_t blocksz) {
constexpr int num_shared_variables = 3;
int intermediate_data_size = FPTypeSize(info->fp_type);
int tile_height = GetTileHeight(info->fp_type);
int tile_width = blocksz * DIAGS_PER_THREAD + tile_height;
int smem = (tile_width + tile_height) *
(num_shared_variables + info->opt_args.num_extra_operands) *
intermediate_data_size;
int profile_data_size = GetProfileTypeSizeInternalGPU(info->profile_type);
if (info->computing_cols) {
smem += tile_width * profile_data_size;
}
if (info->computing_rows) {
smem += tile_height * profile_data_size;
}
if (NeedsCheckIfDone(info->profile_type)) {
smem += 2 * sizeof(uint64_t);
}
return smem;
}
} // namespace SCAMP
|
919018f99fa7fc44adaa6b8b7a9f6980e3a27d8e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void add(int *a, int *b, int *c, int n) {
int index = threadIdx.x+blockIdx.x*blockDim.x;
if(index < n)
c[index] = a[index] + b[index];
}
#define N 9
#define M 5
void random_ints(int* x, int size)
{
int i;
for (i=0;i<size;i++) {
x[i]=rand()%10;
}
}
int main(void) {
int *a1, *b1, *c1, *a2, *b2, *c2; // host copies of a, b, c
int *a1_d, *b1_d, *c1_d, *a2_d, *b2_d, *c2_d; // device copies of a, b, c
int size = N * sizeof(int);
hipStream_t stream1,stream2;
hipError_t error;
error = hipStreamCreate(&stream1);
error = hipStreamCreate(&stream2);
// Alloc space for device copies of a, b, c
hipMalloc((void **)&a1_d, size);
hipMalloc((void **)&b1_d, size);
hipMalloc((void **)&c1_d, size);
hipMalloc((void **)&a2_d, size);
hipMalloc((void **)&b2_d, size);
hipMalloc((void **)&c2_d, size);
// Alloc space for host copies of a, b, c and setup input values
a1 = (int *)malloc(size); random_ints(a1, N);
b1 = (int *)malloc(size); random_ints(b1, N);
c1 = (int *)malloc(size);
a2 = (int *)malloc(size); random_ints(a2, N);
b2 = (int *)malloc(size); random_ints(b2, N);
c2 = (int *)malloc(size);
// Copy inputs to device
hipMemcpyAsync(a1_d, a1, size, hipMemcpyHostToDevice, stream1);
hipMemcpyAsync(b1_d, b1, size, hipMemcpyHostToDevice, stream1);
hipMemcpyAsync(a2_d, a2, size, hipMemcpyHostToDevice, stream2);
hipMemcpyAsync(b2_d, b2, size, hipMemcpyHostToDevice, stream2);
// Launch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3((N+M-1)/M),dim3(M),0,stream1, a1_d, b1_d, c1_d,N);
hipLaunchKernelGGL(( add), dim3((N+M-1)/M),dim3(M),0,stream2, a2_d, b2_d, c2_d,N);
// Copy result back to host
hipMemcpyAsync(c1, c1_d, size, hipMemcpyDeviceToHost, stream1);
hipMemcpyAsync(c2, c2_d, size, hipMemcpyDeviceToHost, stream2);
/*error=hipStreamSynchronize(stream1);
// Print results
for(int i=0; i<N; i++)printf("stream1 %d + %d = %d\n",a1[i],b1[i],c1[i]);
error=hipStreamSynchronize(stream2);
// Print results
for(int i=0; i<N; i++)printf("stream2 %d + %d = %d\n",a2[i],b2[i],c2[i]);
*/
hipDeviceSynchronize();
for(int i=0; i<N; i++)printf("stream1 %d + %d = %d\t stream2 %d + %d = %d\n",a1[i],b1[i],c1[i],a2[i],b2[i],c2[i]);
if (error != 0) {
printf ("%s\n", hipGetErrorString (error));
exit (1);
}
// Cleanup
free(a1); free(b1); free(c1);
free(a2); free(b2); free(c2);
hipFree(a1_d); hipFree(b1_d); hipFree(c1_d);
hipFree(a2_d); hipFree(b2_d); hipFree(c2_d);
return 0;
}
| 919018f99fa7fc44adaa6b8b7a9f6980e3a27d8e.cu | #include <cuda.h>
#include <stdio.h>
__global__ void add(int *a, int *b, int *c, int n) {
int index = threadIdx.x+blockIdx.x*blockDim.x;
if(index < n)
c[index] = a[index] + b[index];
}
#define N 9
#define M 5
void random_ints(int* x, int size)
{
int i;
for (i=0;i<size;i++) {
x[i]=rand()%10;
}
}
int main(void) {
int *a1, *b1, *c1, *a2, *b2, *c2; // host copies of a, b, c
int *a1_d, *b1_d, *c1_d, *a2_d, *b2_d, *c2_d; // device copies of a, b, c
int size = N * sizeof(int);
cudaStream_t stream1,stream2;
cudaError_t error;
error = cudaStreamCreate(&stream1);
error = cudaStreamCreate(&stream2);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&a1_d, size);
cudaMalloc((void **)&b1_d, size);
cudaMalloc((void **)&c1_d, size);
cudaMalloc((void **)&a2_d, size);
cudaMalloc((void **)&b2_d, size);
cudaMalloc((void **)&c2_d, size);
// Alloc space for host copies of a, b, c and setup input values
a1 = (int *)malloc(size); random_ints(a1, N);
b1 = (int *)malloc(size); random_ints(b1, N);
c1 = (int *)malloc(size);
a2 = (int *)malloc(size); random_ints(a2, N);
b2 = (int *)malloc(size); random_ints(b2, N);
c2 = (int *)malloc(size);
// Copy inputs to device
cudaMemcpyAsync(a1_d, a1, size, cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(b1_d, b1, size, cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(a2_d, a2, size, cudaMemcpyHostToDevice, stream2);
cudaMemcpyAsync(b2_d, b2, size, cudaMemcpyHostToDevice, stream2);
// Launch add() kernel on GPU
add<<<(N+M-1)/M,M,0,stream1>>>(a1_d, b1_d, c1_d,N);
add<<<(N+M-1)/M,M,0,stream2>>>(a2_d, b2_d, c2_d,N);
// Copy result back to host
cudaMemcpyAsync(c1, c1_d, size, cudaMemcpyDeviceToHost, stream1);
cudaMemcpyAsync(c2, c2_d, size, cudaMemcpyDeviceToHost, stream2);
/*error=cudaStreamSynchronize(stream1);
// Print results
for(int i=0; i<N; i++)printf("stream1 %d + %d = %d\n",a1[i],b1[i],c1[i]);
error=cudaStreamSynchronize(stream2);
// Print results
for(int i=0; i<N; i++)printf("stream2 %d + %d = %d\n",a2[i],b2[i],c2[i]);
*/
cudaDeviceSynchronize();
for(int i=0; i<N; i++)printf("stream1 %d + %d = %d\t stream2 %d + %d = %d\n",a1[i],b1[i],c1[i],a2[i],b2[i],c2[i]);
if (error != 0) {
printf ("%s\n", cudaGetErrorString (error));
exit (1);
}
// Cleanup
free(a1); free(b1); free(c1);
free(a2); free(b2); free(c2);
cudaFree(a1_d); cudaFree(b1_d); cudaFree(c1_d);
cudaFree(a2_d); cudaFree(b2_d); cudaFree(c2_d);
return 0;
}
|
9c20210503562edcd140054380285ec2b9701b77.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace arithm
{
template <size_t src_size, size_t dst_size> struct ArithmFuncTraits
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 1 };
};
template <> struct ArithmFuncTraits<1, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<1, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<1, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
}
//////////////////////////////////////////////////////////////////////////
// addMat
namespace arithm
{
struct VAdd4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vadd4(a, b);
}
__device__ __forceinline__ VAdd4() {}
__device__ __forceinline__ VAdd4(const VAdd4& other) {}
};
////////////////////////////////////
struct VAdd2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vadd2(a, b);
}
__device__ __forceinline__ VAdd2() {}
__device__ __forceinline__ VAdd2(const VAdd2& other) {}
};
////////////////////////////////////
template <typename T, typename D> struct AddMat : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a + b);
}
__device__ __forceinline__ AddMat() {}
__device__ __forceinline__ AddMat(const AddMat& other) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VAdd4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VAdd2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::AddMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void addMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VAdd4(), WithOutMask(), stream);
}
void addMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VAdd2(), WithOutMask(), stream);
}
template <typename T, typename D>
void addMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
if (mask.data)
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, AddMat<T, D>(), mask, stream);
else
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, AddMat<T, D>(), WithOutMask(), stream);
}
template void addMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// addScalar
namespace arithm
{
template <typename T, typename S, typename D> struct AddScalar : unary_function<T, D>
{
S val;
explicit AddScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a + val);
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::AddScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void addScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
AddScalar<T, S, D> op(static_cast<S>(val));
if (mask.data)
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
else
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// subMat
namespace arithm
{
struct VSub4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vsub4(a, b);
}
__device__ __forceinline__ VSub4() {}
__device__ __forceinline__ VSub4(const VSub4& other) {}
};
////////////////////////////////////
struct VSub2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vsub2(a, b);
}
__device__ __forceinline__ VSub2() {}
__device__ __forceinline__ VSub2(const VSub2& other) {}
};
////////////////////////////////////
template <typename T, typename D> struct SubMat : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a - b);
}
__device__ __forceinline__ SubMat() {}
__device__ __forceinline__ SubMat(const SubMat& other) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VSub4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VSub2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::SubMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void subMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VSub4(), WithOutMask(), stream);
}
void subMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VSub2(), WithOutMask(), stream);
}
template <typename T, typename D>
void subMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
if (mask.data)
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), mask, stream);
else
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), WithOutMask(), stream);
}
template void subMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// subScalar
namespace arithm
{
template <typename T, typename S, typename D>
void subScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
AddScalar<T, S, D> op(-static_cast<S>(val));
if (mask.data)
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
else
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void subScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// mulMat
namespace arithm
{
struct Mul_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
return res;
}
__device__ __forceinline__ Mul_8uc4_32f() {}
__device__ __forceinline__ Mul_8uc4_32f(const Mul_8uc4_32f& other) {}
};
struct Mul_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return make_short4(saturate_cast<short>(a.x * b), saturate_cast<short>(a.y * b),
saturate_cast<short>(a.z * b), saturate_cast<short>(a.w * b));
}
__device__ __forceinline__ Mul_16sc4_32f() {}
__device__ __forceinline__ Mul_16sc4_32f(const Mul_16sc4_32f& other) {}
};
template <typename T, typename D> struct Mul : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a * b);
}
__device__ __forceinline__ Mul() {}
__device__ __forceinline__ Mul(const Mul& other) {}
};
template <typename T, typename S, typename D> struct MulScale : binary_function<T, T, D>
{
S scale;
explicit MulScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(scale * a * b);
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits<arithm::Mul_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T, typename D> struct TransformFunctorTraits< arithm::Mul<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void mulMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, Mul_8uc4_32f(), WithOutMask(), stream);
}
void mulMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, Mul_16sc4_32f(), WithOutMask(), stream);
}
template <typename T, typename S, typename D>
void mulMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream)
{
if (scale == 1)
{
Mul<T, D> op;
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
MulScale<T, S, D> op(static_cast<S>(scale));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void mulMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// mulScalar
namespace arithm
{
template <typename T, typename S, typename D> struct MulScalar : unary_function<T, D>
{
S val;
explicit MulScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a * val);
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void mulScalar(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream)
{
MulScalar<T, S, D> op(static_cast<S>(val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void mulScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// divMat
namespace arithm
{
struct Div_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
if (b != 0)
{
b = 1.0f / b;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
}
return res;
}
};
struct Div_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return b != 0 ? make_short4(saturate_cast<short>(a.x / b), saturate_cast<short>(a.y / b),
saturate_cast<short>(a.z / b), saturate_cast<short>(a.w / b))
: make_short4(0,0,0,0);
}
};
template <typename T, typename D> struct Div : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return b != 0 ? saturate_cast<D>(a / b) : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
};
template <typename T> struct Div<T, float> : binary_function<T, T, float>
{
__device__ __forceinline__ float operator ()(T a, T b) const
{
return b != 0 ? static_cast<float>(a) / b : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
};
template <typename T> struct Div<T, double> : binary_function<T, T, double>
{
__device__ __forceinline__ double operator ()(T a, T b) const
{
return b != 0 ? static_cast<double>(a) / b : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
};
template <typename T, typename S, typename D> struct DivScale : binary_function<T, T, D>
{
S scale;
explicit DivScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return b != 0 ? saturate_cast<D>(scale * a / b) : 0;
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits<arithm::Div_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T, typename D> struct TransformFunctorTraits< arithm::Div<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void divMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, Div_8uc4_32f(), WithOutMask(), stream);
}
void divMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, Div_16sc4_32f(), WithOutMask(), stream);
}
template <typename T, typename S, typename D>
void divMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream)
{
if (scale == 1)
{
Div<T, D> op;
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
DivScale<T, S, D> op(static_cast<S>(scale));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void divMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// divScalar
namespace arithm
{
template <typename T, typename S, typename D>
void divScalar(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream)
{
MulScalar<T, S, D> op(static_cast<S>(1.0 / val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void divScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// divInv
namespace arithm
{
template <typename T, typename S, typename D> struct DivInv : unary_function<T, D>
{
S val;
explicit DivInv(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return a != 0 ? saturate_cast<D>(val / a) : 0;
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivInv<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void divInv(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream)
{
DivInv<T, S, D> op(static_cast<S>(val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void divInv<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// absDiffMat
namespace arithm
{
struct VAbsDiff4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff4(a, b);
}
__device__ __forceinline__ VAbsDiff4() {}
__device__ __forceinline__ VAbsDiff4(const VAbsDiff4& other) {}
};
////////////////////////////////////
struct VAbsDiff2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff2(a, b);
}
__device__ __forceinline__ VAbsDiff2() {}
__device__ __forceinline__ VAbsDiff2(const VAbsDiff2& other) {}
};
////////////////////////////////////
__device__ __forceinline__ int _abs(int a)
{
return ::abs(a);
}
__device__ __forceinline__ float _abs(float a)
{
return ::fabsf(a);
}
__device__ __forceinline__ double _abs(double a)
{
return ::fabs(a);
}
template <typename T> struct AbsDiffMat : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(T a, T b) const
{
return saturate_cast<T>(_abs(a - b));
}
__device__ __forceinline__ AbsDiffMat() {}
__device__ __forceinline__ AbsDiffMat(const AbsDiffMat& other) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VAbsDiff4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VAbsDiff2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< arithm::AbsDiffMat<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void absDiffMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VAbsDiff4(), WithOutMask(), stream);
}
void absDiffMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VAbsDiff2(), WithOutMask(), stream);
}
template <typename T>
void absDiffMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, AbsDiffMat<T>(), WithOutMask(), stream);
}
template void absDiffMat<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// absDiffScalar
namespace arithm
{
template <typename T, typename S> struct AbsDiffScalar : unary_function<T, T>
{
S val;
explicit AbsDiffScalar(S val_) : val(val_) {}
__device__ __forceinline__ T operator ()(T a) const
{
abs_func<S> f;
return saturate_cast<T>(f(a - val));
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T, typename S> struct TransformFunctorTraits< arithm::AbsDiffScalar<T, S> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T, typename S>
void absDiffScalar(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream)
{
AbsDiffScalar<T, S> op(static_cast<S>(val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, op, WithOutMask(), stream);
}
template void absDiffScalar<uchar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<schar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<ushort, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<short, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<int, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<float, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<double, double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// absMat
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< abs_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void absMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, abs_func<T>(), WithOutMask(), stream);
}
template void absMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// sqrMat
namespace arithm
{
template <typename T> struct Sqr : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
return saturate_cast<T>(x * x);
}
__device__ __forceinline__ Sqr() {}
__device__ __forceinline__ Sqr(const Sqr& other) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< arithm::Sqr<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void sqrMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, Sqr<T>(), WithOutMask(), stream);
}
template void sqrMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// sqrtMat
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< sqrt_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void sqrtMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, sqrt_func<T>(), WithOutMask(), stream);
}
template void sqrtMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// logMat
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< log_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void logMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, log_func<T>(), WithOutMask(), stream);
}
template void logMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// expMat
namespace arithm
{
template <typename T> struct Exp : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
exp_func<T> f;
return saturate_cast<T>(f(x));
}
__device__ __forceinline__ Exp() {}
__device__ __forceinline__ Exp(const Exp& other) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< arithm::Exp<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void expMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, Exp<T>(), WithOutMask(), stream);
}
template void expMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// cmpMat
namespace arithm
{
struct VCmpEq4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmpeq4(a, b);
}
__device__ __forceinline__ VCmpEq4() {}
__device__ __forceinline__ VCmpEq4(const VCmpEq4& other) {}
};
struct VCmpNe4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmpne4(a, b);
}
__device__ __forceinline__ VCmpNe4() {}
__device__ __forceinline__ VCmpNe4(const VCmpNe4& other) {}
};
struct VCmpLt4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmplt4(a, b);
}
__device__ __forceinline__ VCmpLt4() {}
__device__ __forceinline__ VCmpLt4(const VCmpLt4& other) {}
};
struct VCmpLe4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmple4(a, b);
}
__device__ __forceinline__ VCmpLe4() {}
__device__ __forceinline__ VCmpLe4(const VCmpLe4& other) {}
};
////////////////////////////////////
template <class Op, typename T>
struct Cmp : binary_function<T, T, uchar>
{
__device__ __forceinline__ uchar operator()(T a, T b) const
{
Op op;
return -op(a, b);
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VCmpEq4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VCmpNe4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VCmpLt4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VCmpLe4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <class Op, typename T> struct TransformFunctorTraits< arithm::Cmp<Op, T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{
};
}}}
namespace arithm
{
void cmpMatEq_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VCmpEq4(), WithOutMask(), stream);
}
void cmpMatNe_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VCmpNe4(), WithOutMask(), stream);
}
void cmpMatLt_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VCmpLt4(), WithOutMask(), stream);
}
void cmpMatLe_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VCmpLe4(), WithOutMask(), stream);
}
template <template <typename> class Op, typename T>
void cmpMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
Cmp<Op<T>, T> op;
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, dst, op, WithOutMask(), stream);
}
template <typename T> void cmpMatEq(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cmpMat<equal_to, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatNe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cmpMat<not_equal_to, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatLt(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cmpMat<less, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatLe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cmpMat<less_equal, T>(src1, src2, dst, stream);
}
template void cmpMatEq<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// cmpScalar
namespace arithm
{
#define TYPE_VEC(type, cn) typename TypeVec<type, cn>::vec_type
template <class Op, typename T, int cn> struct CmpScalar;
template <class Op, typename T>
struct CmpScalar<Op, T, 1> : unary_function<T, uchar>
{
const T val;
__host__ explicit CmpScalar(T val_) : val(val_) {}
__device__ __forceinline__ uchar operator()(T src) const
{
Cmp<Op, T> op;
return op(src, val);
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 2> : unary_function<TYPE_VEC(T, 2), TYPE_VEC(uchar, 2)>
{
const TYPE_VEC(T, 2) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 2) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 2) operator()(const TYPE_VEC(T, 2) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 2)>::make(op(src.x, val.x), op(src.y, val.y));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 3> : unary_function<TYPE_VEC(T, 3), TYPE_VEC(uchar, 3)>
{
const TYPE_VEC(T, 3) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 3) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 3) operator()(const TYPE_VEC(T, 3) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 3)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 4> : unary_function<TYPE_VEC(T, 4), TYPE_VEC(uchar, 4)>
{
const TYPE_VEC(T, 4) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 4) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 4) operator()(const TYPE_VEC(T, 4) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 4)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z), op(src.w, val.w));
}
};
#undef TYPE_VEC
}
namespace cv { namespace gpu { namespace cudev
{
template <class Op, typename T> struct TransformFunctorTraits< arithm::CmpScalar<Op, T, 1> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T, int cn>
void cmpScalar(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type src_t;
typedef typename TypeVec<uchar, cn>::vec_type dst_t;
T sval[] = {static_cast<T>(val[0]), static_cast<T>(val[1]), static_cast<T>(val[2]), static_cast<T>(val[3])};
src_t val1 = VecTraits<src_t>::make(sval);
CmpScalar<Op<T>, T, cn> op(val1);
cudev::transform((PtrStepSz<src_t>) src, (PtrStepSz<dst_t>) dst, op, WithOutMask(), stream);
}
template <typename T> void cmpScalarEq(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<equal_to, T, 1>,
cmpScalar<equal_to, T, 2>,
cmpScalar<equal_to, T, 3>,
cmpScalar<equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarNe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<not_equal_to, T, 1>,
cmpScalar<not_equal_to, T, 2>,
cmpScalar<not_equal_to, T, 3>,
cmpScalar<not_equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less, T, 1>,
cmpScalar<less, T, 2>,
cmpScalar<less, T, 3>,
cmpScalar<less, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less_equal, T, 1>,
cmpScalar<less_equal, T, 2>,
cmpScalar<less_equal, T, 3>,
cmpScalar<less_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater, T, 1>,
cmpScalar<greater, T, 2>,
cmpScalar<greater, T, 3>,
cmpScalar<greater, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater_equal, T, 1>,
cmpScalar<greater_equal, T, 2>,
cmpScalar<greater_equal, T, 3>,
cmpScalar<greater_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template void cmpScalarEq<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// bitMat
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< bit_not<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_and<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_or<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_xor<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void bitMatNot(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
if (mask.data)
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, bit_not<T>(), mask, stream);
else
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, bit_not<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatAnd(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
if (mask.data)
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_and<T>(), mask, stream);
else
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_and<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatOr(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
if (mask.data)
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_or<T>(), mask, stream);
else
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_or<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatXor(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
if (mask.data)
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_xor<T>(), mask, stream);
else
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_xor<T>(), WithOutMask(), stream);
}
template void bitMatNot<uchar>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatNot<ushort>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatNot<uint>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatAnd<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatAnd<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatAnd<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatOr<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatOr<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatOr<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatXor<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatXor<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatXor<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// bitScalar
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< binder2nd< bit_and<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< bit_or<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< bit_xor<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void bitScalarAnd(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(bit_and<T>(), src2), WithOutMask(), stream);
}
template <typename T> void bitScalarOr(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(bit_or<T>(), src2), WithOutMask(), stream);
}
template <typename T> void bitScalarXor(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(bit_xor<T>(), src2), WithOutMask(), stream);
}
template void bitScalarAnd<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarAnd<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarAnd<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarAnd<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarOr<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarOr<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarOr<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarOr<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarXor<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarXor<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarXor<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarXor<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// min
namespace arithm
{
struct VMin4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin4(a, b);
}
__device__ __forceinline__ VMin4() {}
__device__ __forceinline__ VMin4(const VMin4& other) {}
};
////////////////////////////////////
struct VMin2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin2(a, b);
}
__device__ __forceinline__ VMin2() {}
__device__ __forceinline__ VMin2(const VMin2& other) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VMin4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VMin2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< minimum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< minimum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void minMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VMin4(), WithOutMask(), stream);
}
void minMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VMin2(), WithOutMask(), stream);
}
template <typename T> void minMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, minimum<T>(), WithOutMask(), stream);
}
template void minMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template <typename T> void minScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(minimum<T>(), src2), WithOutMask(), stream);
}
template void minScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// max
namespace arithm
{
struct VMax4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax4(a, b);
}
__device__ __forceinline__ VMax4() {}
__device__ __forceinline__ VMax4(const VMax4& other) {}
};
////////////////////////////////////
struct VMax2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax2(a, b);
}
__device__ __forceinline__ VMax2() {}
__device__ __forceinline__ VMax2(const VMax2& other) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VMax4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VMax2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< maximum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< maximum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void maxMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VMax4(), WithOutMask(), stream);
}
void maxMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
cudev::transform(src1, src2, dst, VMax2(), WithOutMask(), stream);
}
template <typename T> void maxMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, maximum<T>(), WithOutMask(), stream);
}
template void maxMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template <typename T> void maxScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(maximum<T>(), src2), WithOutMask(), stream);
}
template void maxScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// threshold
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< thresh_binary_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_binary_inv_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_trunc_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_to_zero_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_to_zero_inv_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T>
void threshold_caller(PtrStepSz<T> src, PtrStepSz<T> dst, T thresh, T maxVal, hipStream_t stream)
{
Op<T> op(thresh, maxVal);
cudev::transform(src, dst, op, WithOutMask(), stream);
}
template <typename T>
void threshold(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<T> dst, T thresh, T maxVal, hipStream_t stream);
static const caller_t callers[] =
{
threshold_caller<thresh_binary_func, T>,
threshold_caller<thresh_binary_inv_func, T>,
threshold_caller<thresh_trunc_func, T>,
threshold_caller<thresh_to_zero_func, T>,
threshold_caller<thresh_to_zero_inv_func, T>
};
callers[type]((PtrStepSz<T>) src, (PtrStepSz<T>) dst, static_cast<T>(thresh), static_cast<T>(maxVal), stream);
}
template void threshold<uchar>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<schar>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<ushort>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<short>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<int>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<float>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<double>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// pow
namespace arithm
{
template<typename T, bool Signed = numeric_limits<T>::is_signed> struct PowOp : unary_function<T, T>
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{
return saturate_cast<T>(__powf((float)e, power));
}
};
template<typename T> struct PowOp<T, true> : unary_function<T, T>
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{
T res = saturate_cast<T>(__powf((float)e, power));
if ((e < 0) && (1 & static_cast<int>(power)))
res *= -1;
return res;
}
};
template<> struct PowOp<float> : unary_function<float, float>
{
const float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ float operator()(float e) const
{
return __powf(::fabs(e), power);
}
};
template<> struct PowOp<double> : unary_function<double, double>
{
double power;
PowOp(double power_) : power(power_) {}
__device__ __forceinline__ double operator()(double e) const
{
return ::pow(::fabs(e), power);
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< arithm::PowOp<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template<typename T>
void pow(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream)
{
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, PowOp<T>(power), WithOutMask(), stream);
}
template void pow<uchar>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<schar>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<short>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<ushort>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<int>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<float>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<double>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// addWeighted
namespace arithm
{
template <typename T> struct UseDouble_
{
enum {value = 0};
};
template <> struct UseDouble_<double>
{
enum {value = 1};
};
template <typename T1, typename T2, typename D> struct UseDouble
{
enum {value = (UseDouble_<T1>::value || UseDouble_<T2>::value || UseDouble_<D>::value)};
};
template <typename T1, typename T2, typename D, bool useDouble> struct AddWeighted_;
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, false> : binary_function<T1, T2, D>
{
float alpha;
float beta;
float gamma;
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(static_cast<float>(alpha_)), beta(static_cast<float>(beta_)), gamma(static_cast<float>(gamma_)) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, true> : binary_function<T1, T2, D>
{
double alpha;
double beta;
double gamma;
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(alpha_), beta(beta_), gamma(gamma_) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>
{
AddWeighted(double alpha_, double beta_, double gamma_) : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>(alpha_, beta_, gamma_) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T1, typename T2, typename D, size_t src1_size, size_t src2_size, size_t dst_size> struct AddWeightedTraits : DefaultTransformFunctorTraits< arithm::AddWeighted<T1, T2, D> >
{
};
template <typename T1, typename T2, typename D, size_t src_size, size_t dst_size> struct AddWeightedTraits<T1, T2, D, src_size, src_size, dst_size> : arithm::ArithmFuncTraits<src_size, dst_size>
{
};
template <typename T1, typename T2, typename D> struct TransformFunctorTraits< arithm::AddWeighted<T1, T2, D> > : AddWeightedTraits<T1, T2, D, sizeof(T1), sizeof(T2), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T1, typename T2, typename D>
void addWeighted(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream)
{
AddWeighted<T1, T2, D> op(alpha, beta, gamma);
cudev::transform((PtrStepSz<T1>) src1, (PtrStepSz<T2>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addWeighted<uchar, uchar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| 9c20210503562edcd140054380285ec2b9701b77.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
using namespace cv::gpu;
using namespace cv::gpu::cudev;
namespace arithm
{
template <size_t src_size, size_t dst_size> struct ArithmFuncTraits
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 1 };
};
template <> struct ArithmFuncTraits<1, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<1, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<1, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
}
//////////////////////////////////////////////////////////////////////////
// addMat
namespace arithm
{
struct VAdd4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vadd4(a, b);
}
__device__ __forceinline__ VAdd4() {}
__device__ __forceinline__ VAdd4(const VAdd4& other) {}
};
////////////////////////////////////
struct VAdd2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vadd2(a, b);
}
__device__ __forceinline__ VAdd2() {}
__device__ __forceinline__ VAdd2(const VAdd2& other) {}
};
////////////////////////////////////
template <typename T, typename D> struct AddMat : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a + b);
}
__device__ __forceinline__ AddMat() {}
__device__ __forceinline__ AddMat(const AddMat& other) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VAdd4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VAdd2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::AddMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void addMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VAdd4(), WithOutMask(), stream);
}
void addMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VAdd2(), WithOutMask(), stream);
}
template <typename T, typename D>
void addMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
if (mask.data)
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, AddMat<T, D>(), mask, stream);
else
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, AddMat<T, D>(), WithOutMask(), stream);
}
template void addMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// addScalar
namespace arithm
{
template <typename T, typename S, typename D> struct AddScalar : unary_function<T, D>
{
S val;
explicit AddScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a + val);
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::AddScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void addScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
AddScalar<T, S, D> op(static_cast<S>(val));
if (mask.data)
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
else
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// subMat
namespace arithm
{
struct VSub4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vsub4(a, b);
}
__device__ __forceinline__ VSub4() {}
__device__ __forceinline__ VSub4(const VSub4& other) {}
};
////////////////////////////////////
struct VSub2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vsub2(a, b);
}
__device__ __forceinline__ VSub2() {}
__device__ __forceinline__ VSub2(const VSub2& other) {}
};
////////////////////////////////////
template <typename T, typename D> struct SubMat : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a - b);
}
__device__ __forceinline__ SubMat() {}
__device__ __forceinline__ SubMat(const SubMat& other) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VSub4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VSub2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::SubMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void subMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VSub4(), WithOutMask(), stream);
}
void subMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VSub2(), WithOutMask(), stream);
}
template <typename T, typename D>
void subMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
if (mask.data)
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), mask, stream);
else
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), WithOutMask(), stream);
}
template void subMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// subScalar
namespace arithm
{
template <typename T, typename S, typename D>
void subScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
AddScalar<T, S, D> op(-static_cast<S>(val));
if (mask.data)
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
else
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void subScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// mulMat
namespace arithm
{
struct Mul_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
return res;
}
__device__ __forceinline__ Mul_8uc4_32f() {}
__device__ __forceinline__ Mul_8uc4_32f(const Mul_8uc4_32f& other) {}
};
struct Mul_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return make_short4(saturate_cast<short>(a.x * b), saturate_cast<short>(a.y * b),
saturate_cast<short>(a.z * b), saturate_cast<short>(a.w * b));
}
__device__ __forceinline__ Mul_16sc4_32f() {}
__device__ __forceinline__ Mul_16sc4_32f(const Mul_16sc4_32f& other) {}
};
template <typename T, typename D> struct Mul : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a * b);
}
__device__ __forceinline__ Mul() {}
__device__ __forceinline__ Mul(const Mul& other) {}
};
template <typename T, typename S, typename D> struct MulScale : binary_function<T, T, D>
{
S scale;
explicit MulScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(scale * a * b);
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits<arithm::Mul_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T, typename D> struct TransformFunctorTraits< arithm::Mul<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void mulMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, Mul_8uc4_32f(), WithOutMask(), stream);
}
void mulMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, Mul_16sc4_32f(), WithOutMask(), stream);
}
template <typename T, typename S, typename D>
void mulMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream)
{
if (scale == 1)
{
Mul<T, D> op;
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
MulScale<T, S, D> op(static_cast<S>(scale));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void mulMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// mulScalar
namespace arithm
{
template <typename T, typename S, typename D> struct MulScalar : unary_function<T, D>
{
S val;
explicit MulScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a * val);
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void mulScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
{
MulScalar<T, S, D> op(static_cast<S>(val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void mulScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// divMat
namespace arithm
{
struct Div_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
if (b != 0)
{
b = 1.0f / b;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
}
return res;
}
};
struct Div_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return b != 0 ? make_short4(saturate_cast<short>(a.x / b), saturate_cast<short>(a.y / b),
saturate_cast<short>(a.z / b), saturate_cast<short>(a.w / b))
: make_short4(0,0,0,0);
}
};
template <typename T, typename D> struct Div : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return b != 0 ? saturate_cast<D>(a / b) : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
};
template <typename T> struct Div<T, float> : binary_function<T, T, float>
{
__device__ __forceinline__ float operator ()(T a, T b) const
{
return b != 0 ? static_cast<float>(a) / b : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
};
template <typename T> struct Div<T, double> : binary_function<T, T, double>
{
__device__ __forceinline__ double operator ()(T a, T b) const
{
return b != 0 ? static_cast<double>(a) / b : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
};
template <typename T, typename S, typename D> struct DivScale : binary_function<T, T, D>
{
S scale;
explicit DivScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return b != 0 ? saturate_cast<D>(scale * a / b) : 0;
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits<arithm::Div_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T, typename D> struct TransformFunctorTraits< arithm::Div<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void divMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, Div_8uc4_32f(), WithOutMask(), stream);
}
void divMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, Div_16sc4_32f(), WithOutMask(), stream);
}
template <typename T, typename S, typename D>
void divMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream)
{
if (scale == 1)
{
Div<T, D> op;
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
DivScale<T, S, D> op(static_cast<S>(scale));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void divMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// divScalar
namespace arithm
{
template <typename T, typename S, typename D>
void divScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
{
MulScalar<T, S, D> op(static_cast<S>(1.0 / val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void divScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// divInv
namespace arithm
{
template <typename T, typename S, typename D> struct DivInv : unary_function<T, D>
{
S val;
explicit DivInv(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return a != 0 ? saturate_cast<D>(val / a) : 0;
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivInv<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void divInv(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
{
DivInv<T, S, D> op(static_cast<S>(val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void divInv<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// absDiffMat
namespace arithm
{
struct VAbsDiff4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff4(a, b);
}
__device__ __forceinline__ VAbsDiff4() {}
__device__ __forceinline__ VAbsDiff4(const VAbsDiff4& other) {}
};
////////////////////////////////////
struct VAbsDiff2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vabsdiff2(a, b);
}
__device__ __forceinline__ VAbsDiff2() {}
__device__ __forceinline__ VAbsDiff2(const VAbsDiff2& other) {}
};
////////////////////////////////////
__device__ __forceinline__ int _abs(int a)
{
return ::abs(a);
}
__device__ __forceinline__ float _abs(float a)
{
return ::fabsf(a);
}
__device__ __forceinline__ double _abs(double a)
{
return ::fabs(a);
}
template <typename T> struct AbsDiffMat : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(T a, T b) const
{
return saturate_cast<T>(_abs(a - b));
}
__device__ __forceinline__ AbsDiffMat() {}
__device__ __forceinline__ AbsDiffMat(const AbsDiffMat& other) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VAbsDiff4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VAbsDiff2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< arithm::AbsDiffMat<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void absDiffMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VAbsDiff4(), WithOutMask(), stream);
}
void absDiffMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VAbsDiff2(), WithOutMask(), stream);
}
template <typename T>
void absDiffMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, AbsDiffMat<T>(), WithOutMask(), stream);
}
template void absDiffMat<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// absDiffScalar
namespace arithm
{
template <typename T, typename S> struct AbsDiffScalar : unary_function<T, T>
{
S val;
explicit AbsDiffScalar(S val_) : val(val_) {}
__device__ __forceinline__ T operator ()(T a) const
{
abs_func<S> f;
return saturate_cast<T>(f(a - val));
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T, typename S> struct TransformFunctorTraits< arithm::AbsDiffScalar<T, S> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T, typename S>
void absDiffScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
{
AbsDiffScalar<T, S> op(static_cast<S>(val));
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, op, WithOutMask(), stream);
}
template void absDiffScalar<uchar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<schar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<ushort, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<short, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<int, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<float, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<double, double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// absMat
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< abs_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void absMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, abs_func<T>(), WithOutMask(), stream);
}
template void absMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// sqrMat
namespace arithm
{
template <typename T> struct Sqr : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
return saturate_cast<T>(x * x);
}
__device__ __forceinline__ Sqr() {}
__device__ __forceinline__ Sqr(const Sqr& other) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< arithm::Sqr<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void sqrMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, Sqr<T>(), WithOutMask(), stream);
}
template void sqrMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// sqrtMat
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< sqrt_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void sqrtMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, sqrt_func<T>(), WithOutMask(), stream);
}
template void sqrtMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// logMat
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< log_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void logMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, log_func<T>(), WithOutMask(), stream);
}
template void logMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// expMat
namespace arithm
{
template <typename T> struct Exp : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
exp_func<T> f;
return saturate_cast<T>(f(x));
}
__device__ __forceinline__ Exp() {}
__device__ __forceinline__ Exp(const Exp& other) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< arithm::Exp<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void expMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, Exp<T>(), WithOutMask(), stream);
}
template void expMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// cmpMat
namespace arithm
{
struct VCmpEq4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmpeq4(a, b);
}
__device__ __forceinline__ VCmpEq4() {}
__device__ __forceinline__ VCmpEq4(const VCmpEq4& other) {}
};
struct VCmpNe4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmpne4(a, b);
}
__device__ __forceinline__ VCmpNe4() {}
__device__ __forceinline__ VCmpNe4(const VCmpNe4& other) {}
};
struct VCmpLt4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmplt4(a, b);
}
__device__ __forceinline__ VCmpLt4() {}
__device__ __forceinline__ VCmpLt4(const VCmpLt4& other) {}
};
struct VCmpLe4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vcmple4(a, b);
}
__device__ __forceinline__ VCmpLe4() {}
__device__ __forceinline__ VCmpLe4(const VCmpLe4& other) {}
};
////////////////////////////////////
template <class Op, typename T>
struct Cmp : binary_function<T, T, uchar>
{
__device__ __forceinline__ uchar operator()(T a, T b) const
{
Op op;
return -op(a, b);
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VCmpEq4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VCmpNe4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VCmpLt4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VCmpLe4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <class Op, typename T> struct TransformFunctorTraits< arithm::Cmp<Op, T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{
};
}}}
namespace arithm
{
void cmpMatEq_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VCmpEq4(), WithOutMask(), stream);
}
void cmpMatNe_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VCmpNe4(), WithOutMask(), stream);
}
void cmpMatLt_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VCmpLt4(), WithOutMask(), stream);
}
void cmpMatLe_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VCmpLe4(), WithOutMask(), stream);
}
template <template <typename> class Op, typename T>
void cmpMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
Cmp<Op<T>, T> op;
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, dst, op, WithOutMask(), stream);
}
template <typename T> void cmpMatEq(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cmpMat<equal_to, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatNe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cmpMat<not_equal_to, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatLt(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cmpMat<less, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatLe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cmpMat<less_equal, T>(src1, src2, dst, stream);
}
template void cmpMatEq<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// cmpScalar
namespace arithm
{
#define TYPE_VEC(type, cn) typename TypeVec<type, cn>::vec_type
template <class Op, typename T, int cn> struct CmpScalar;
template <class Op, typename T>
struct CmpScalar<Op, T, 1> : unary_function<T, uchar>
{
const T val;
__host__ explicit CmpScalar(T val_) : val(val_) {}
__device__ __forceinline__ uchar operator()(T src) const
{
Cmp<Op, T> op;
return op(src, val);
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 2> : unary_function<TYPE_VEC(T, 2), TYPE_VEC(uchar, 2)>
{
const TYPE_VEC(T, 2) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 2) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 2) operator()(const TYPE_VEC(T, 2) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 2)>::make(op(src.x, val.x), op(src.y, val.y));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 3> : unary_function<TYPE_VEC(T, 3), TYPE_VEC(uchar, 3)>
{
const TYPE_VEC(T, 3) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 3) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 3) operator()(const TYPE_VEC(T, 3) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 3)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 4> : unary_function<TYPE_VEC(T, 4), TYPE_VEC(uchar, 4)>
{
const TYPE_VEC(T, 4) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 4) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 4) operator()(const TYPE_VEC(T, 4) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 4)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z), op(src.w, val.w));
}
};
#undef TYPE_VEC
}
namespace cv { namespace gpu { namespace cudev
{
template <class Op, typename T> struct TransformFunctorTraits< arithm::CmpScalar<Op, T, 1> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T, int cn>
void cmpScalar(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type src_t;
typedef typename TypeVec<uchar, cn>::vec_type dst_t;
T sval[] = {static_cast<T>(val[0]), static_cast<T>(val[1]), static_cast<T>(val[2]), static_cast<T>(val[3])};
src_t val1 = VecTraits<src_t>::make(sval);
CmpScalar<Op<T>, T, cn> op(val1);
cudev::transform((PtrStepSz<src_t>) src, (PtrStepSz<dst_t>) dst, op, WithOutMask(), stream);
}
template <typename T> void cmpScalarEq(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<equal_to, T, 1>,
cmpScalar<equal_to, T, 2>,
cmpScalar<equal_to, T, 3>,
cmpScalar<equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarNe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<not_equal_to, T, 1>,
cmpScalar<not_equal_to, T, 2>,
cmpScalar<not_equal_to, T, 3>,
cmpScalar<not_equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less, T, 1>,
cmpScalar<less, T, 2>,
cmpScalar<less, T, 3>,
cmpScalar<less, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less_equal, T, 1>,
cmpScalar<less_equal, T, 2>,
cmpScalar<less_equal, T, 3>,
cmpScalar<less_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater, T, 1>,
cmpScalar<greater, T, 2>,
cmpScalar<greater, T, 3>,
cmpScalar<greater, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater_equal, T, 1>,
cmpScalar<greater_equal, T, 2>,
cmpScalar<greater_equal, T, 3>,
cmpScalar<greater_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template void cmpScalarEq<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// bitMat
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< bit_not<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_and<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_or<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_xor<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void bitMatNot(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
if (mask.data)
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, bit_not<T>(), mask, stream);
else
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, bit_not<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatAnd(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
if (mask.data)
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_and<T>(), mask, stream);
else
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_and<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatOr(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
if (mask.data)
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_or<T>(), mask, stream);
else
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_or<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatXor(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
if (mask.data)
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_xor<T>(), mask, stream);
else
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_xor<T>(), WithOutMask(), stream);
}
template void bitMatNot<uchar>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatNot<ushort>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatNot<uint>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatAnd<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatAnd<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatAnd<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatOr<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatOr<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatOr<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatXor<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatXor<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatXor<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// bitScalar
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< binder2nd< bit_and<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< bit_or<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< bit_xor<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void bitScalarAnd(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(bit_and<T>(), src2), WithOutMask(), stream);
}
template <typename T> void bitScalarOr(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(bit_or<T>(), src2), WithOutMask(), stream);
}
template <typename T> void bitScalarXor(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(bit_xor<T>(), src2), WithOutMask(), stream);
}
template void bitScalarAnd<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarAnd<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarAnd<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarAnd<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarOr<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarOr<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarOr<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarOr<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarXor<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarXor<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarXor<int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarXor<unsigned int>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// min
namespace arithm
{
struct VMin4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin4(a, b);
}
__device__ __forceinline__ VMin4() {}
__device__ __forceinline__ VMin4(const VMin4& other) {}
};
////////////////////////////////////
struct VMin2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin2(a, b);
}
__device__ __forceinline__ VMin2() {}
__device__ __forceinline__ VMin2(const VMin2& other) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VMin4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VMin2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< minimum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< minimum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void minMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VMin4(), WithOutMask(), stream);
}
void minMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VMin2(), WithOutMask(), stream);
}
template <typename T> void minMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, minimum<T>(), WithOutMask(), stream);
}
template void minMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template <typename T> void minScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(minimum<T>(), src2), WithOutMask(), stream);
}
template void minScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// max
namespace arithm
{
struct VMax4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax4(a, b);
}
__device__ __forceinline__ VMax4() {}
__device__ __forceinline__ VMax4(const VMax4& other) {}
};
////////////////////////////////////
struct VMax2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax2(a, b);
}
__device__ __forceinline__ VMax2() {}
__device__ __forceinline__ VMax2(const VMax2& other) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <> struct TransformFunctorTraits< arithm::VMax4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <> struct TransformFunctorTraits< arithm::VMax2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< maximum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< maximum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void maxMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VMax4(), WithOutMask(), stream);
}
void maxMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
cudev::transform(src1, src2, dst, VMax2(), WithOutMask(), stream);
}
template <typename T> void maxMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, maximum<T>(), WithOutMask(), stream);
}
template void maxMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template <typename T> void maxScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::cudev::bind2nd(maximum<T>(), src2), WithOutMask(), stream);
}
template void maxScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// threshold
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< thresh_binary_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_binary_inv_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_trunc_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_to_zero_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_to_zero_inv_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T>
void threshold_caller(PtrStepSz<T> src, PtrStepSz<T> dst, T thresh, T maxVal, cudaStream_t stream)
{
Op<T> op(thresh, maxVal);
cudev::transform(src, dst, op, WithOutMask(), stream);
}
template <typename T>
void threshold(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<T> dst, T thresh, T maxVal, cudaStream_t stream);
static const caller_t callers[] =
{
threshold_caller<thresh_binary_func, T>,
threshold_caller<thresh_binary_inv_func, T>,
threshold_caller<thresh_trunc_func, T>,
threshold_caller<thresh_to_zero_func, T>,
threshold_caller<thresh_to_zero_inv_func, T>
};
callers[type]((PtrStepSz<T>) src, (PtrStepSz<T>) dst, static_cast<T>(thresh), static_cast<T>(maxVal), stream);
}
template void threshold<uchar>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<schar>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<ushort>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<short>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<int>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<float>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<double>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// pow
namespace arithm
{
template<typename T, bool Signed = numeric_limits<T>::is_signed> struct PowOp : unary_function<T, T>
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{
return saturate_cast<T>(__powf((float)e, power));
}
};
template<typename T> struct PowOp<T, true> : unary_function<T, T>
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{
T res = saturate_cast<T>(__powf((float)e, power));
if ((e < 0) && (1 & static_cast<int>(power)))
res *= -1;
return res;
}
};
template<> struct PowOp<float> : unary_function<float, float>
{
const float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ float operator()(float e) const
{
return __powf(::fabs(e), power);
}
};
template<> struct PowOp<double> : unary_function<double, double>
{
double power;
PowOp(double power_) : power(power_) {}
__device__ __forceinline__ double operator()(double e) const
{
return ::pow(::fabs(e), power);
}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T> struct TransformFunctorTraits< arithm::PowOp<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template<typename T>
void pow(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream)
{
cudev::transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, PowOp<T>(power), WithOutMask(), stream);
}
template void pow<uchar>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<schar>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<short>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<ushort>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<int>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<float>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<double>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// addWeighted
namespace arithm
{
template <typename T> struct UseDouble_
{
enum {value = 0};
};
template <> struct UseDouble_<double>
{
enum {value = 1};
};
template <typename T1, typename T2, typename D> struct UseDouble
{
enum {value = (UseDouble_<T1>::value || UseDouble_<T2>::value || UseDouble_<D>::value)};
};
template <typename T1, typename T2, typename D, bool useDouble> struct AddWeighted_;
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, false> : binary_function<T1, T2, D>
{
float alpha;
float beta;
float gamma;
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(static_cast<float>(alpha_)), beta(static_cast<float>(beta_)), gamma(static_cast<float>(gamma_)) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, true> : binary_function<T1, T2, D>
{
double alpha;
double beta;
double gamma;
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(alpha_), beta(beta_), gamma(gamma_) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>
{
AddWeighted(double alpha_, double beta_, double gamma_) : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>(alpha_, beta_, gamma_) {}
};
}
namespace cv { namespace gpu { namespace cudev
{
template <typename T1, typename T2, typename D, size_t src1_size, size_t src2_size, size_t dst_size> struct AddWeightedTraits : DefaultTransformFunctorTraits< arithm::AddWeighted<T1, T2, D> >
{
};
template <typename T1, typename T2, typename D, size_t src_size, size_t dst_size> struct AddWeightedTraits<T1, T2, D, src_size, src_size, dst_size> : arithm::ArithmFuncTraits<src_size, dst_size>
{
};
template <typename T1, typename T2, typename D> struct TransformFunctorTraits< arithm::AddWeighted<T1, T2, D> > : AddWeightedTraits<T1, T2, D, sizeof(T1), sizeof(T2), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T1, typename T2, typename D>
void addWeighted(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream)
{
AddWeighted<T1, T2, D> op(alpha, beta, gamma);
cudev::transform((PtrStepSz<T1>) src1, (PtrStepSz<T2>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addWeighted<uchar, uchar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
44901e7268a4503632d831741e38ebb29f3d85a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <hip/hip_fp16.h>
#include "splitPlugin.h"
using namespace nvinfer1;
using nvinfer1::plugin::SplitPlugin;
template<typename T>
__device__
int upper_bound(T const* vals, int n, T const& key)
{
int i = 0;
while( n > 0 )
{
int m = n / 2;
int j = i + m;
if( !(key < vals[j]) )
{
i = j + 1;
n -= m + 1;
}
else
{
n = m;
}
}
return i;
}
template<typename T>
__global__
void split_kernel(int nsegment,
int const* __restrict__ segment_offsets,
T const* __restrict__ idata,
T* const* odatas,
int nx,
int src_ny,
int nz)
{
int x0 = threadIdx.x + blockIdx.x * blockDim.x;
int src_y0 = threadIdx.y + blockIdx.y * blockDim.y;
int z0 = threadIdx.z + blockIdx.z * blockDim.z;
for( int z=z0; z<nz; z+=blockDim.z*gridDim.z )
{
for( int src_y=src_y0; src_y<src_ny; src_y+=blockDim.y*gridDim.y )
{
for( int x=x0; x<nx; x+=blockDim.x*gridDim.x )
{
int segment = upper_bound(segment_offsets, nsegment, src_y) - 1;
int dst_y = src_y - segment_offsets[segment];
int dst_ny = segment_offsets[segment + 1] - segment_offsets[segment];
odatas[segment][x + nx*(dst_y + dst_ny*z)] =
idata[x + nx*(src_y + src_ny*z)];
}
}
}
}
int SplitPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) noexcept
{
int const* d_segment_offsets_ptr =
thrust::raw_pointer_cast(&_d_segment_offsets[0]);
float const* idata = reinterpret_cast<float const*>(inputs[0]);
float* const* h_odatas = reinterpret_cast<float* const*>(outputs);
float** odatas = thrust::raw_pointer_cast(&_d_output_ptrs[0]);
hipError_t cuda_status =
hipMemcpyAsync(odatas, h_odatas,
_d_output_ptrs.size() * sizeof(float*),
hipMemcpyHostToDevice, stream);
if( cuda_status != hipSuccess )
{
return 1;
}
int nz = _nz * inputDesc[0].dims.d[0];
dim3 block(32, 16);
dim3 grid(::min((_nx - 1) / block.x + 1, 65535u),
::min((_ny - 1) / block.y + 1, 65535u),
::min((_nz - 1) / block.z + 1, 65535u));
if (inputDesc[0].type==nvinfer1::DataType::kFLOAT)
{
hipLaunchKernelGGL(( split_kernel), dim3(grid), dim3(block), 0, stream,
_d_segment_offsets.size(), d_segment_offsets_ptr, idata, odatas,
_nx, _ny, nz);
}
else
{
hipLaunchKernelGGL(( split_kernel), dim3(grid), dim3(block), 0, stream,
_d_segment_offsets.size(), d_segment_offsets_ptr, (__half const*)idata, (__half**)odatas,
_nx, _ny, nz);
}
return hipGetLastError() != hipSuccess;
}
| 44901e7268a4503632d831741e38ebb29f3d85a0.cu | /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cuda_fp16.h>
#include "splitPlugin.h"
using namespace nvinfer1;
using nvinfer1::plugin::SplitPlugin;
template<typename T>
__device__
int upper_bound(T const* vals, int n, T const& key)
{
int i = 0;
while( n > 0 )
{
int m = n / 2;
int j = i + m;
if( !(key < vals[j]) )
{
i = j + 1;
n -= m + 1;
}
else
{
n = m;
}
}
return i;
}
template<typename T>
__global__
void split_kernel(int nsegment,
int const* __restrict__ segment_offsets,
T const* __restrict__ idata,
T* const* odatas,
int nx,
int src_ny,
int nz)
{
int x0 = threadIdx.x + blockIdx.x * blockDim.x;
int src_y0 = threadIdx.y + blockIdx.y * blockDim.y;
int z0 = threadIdx.z + blockIdx.z * blockDim.z;
for( int z=z0; z<nz; z+=blockDim.z*gridDim.z )
{
for( int src_y=src_y0; src_y<src_ny; src_y+=blockDim.y*gridDim.y )
{
for( int x=x0; x<nx; x+=blockDim.x*gridDim.x )
{
int segment = upper_bound(segment_offsets, nsegment, src_y) - 1;
int dst_y = src_y - segment_offsets[segment];
int dst_ny = segment_offsets[segment + 1] - segment_offsets[segment];
odatas[segment][x + nx*(dst_y + dst_ny*z)] =
idata[x + nx*(src_y + src_ny*z)];
}
}
}
}
int SplitPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc,
const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept
{
int const* d_segment_offsets_ptr =
thrust::raw_pointer_cast(&_d_segment_offsets[0]);
float const* idata = reinterpret_cast<float const*>(inputs[0]);
float* const* h_odatas = reinterpret_cast<float* const*>(outputs);
float** odatas = thrust::raw_pointer_cast(&_d_output_ptrs[0]);
cudaError_t cuda_status =
cudaMemcpyAsync(odatas, h_odatas,
_d_output_ptrs.size() * sizeof(float*),
cudaMemcpyHostToDevice, stream);
if( cuda_status != cudaSuccess )
{
return 1;
}
int nz = _nz * inputDesc[0].dims.d[0];
dim3 block(32, 16);
dim3 grid(std::min((_nx - 1) / block.x + 1, 65535u),
std::min((_ny - 1) / block.y + 1, 65535u),
std::min((_nz - 1) / block.z + 1, 65535u));
if (inputDesc[0].type==nvinfer1::DataType::kFLOAT)
{
split_kernel<<<grid, block, 0, stream>>>
(_d_segment_offsets.size(), d_segment_offsets_ptr, idata, odatas,
_nx, _ny, nz);
}
else
{
split_kernel<<<grid, block, 0, stream>>>
(_d_segment_offsets.size(), d_segment_offsets_ptr, (__half const*)idata, (__half**)odatas,
_nx, _ny, nz);
}
return cudaGetLastError() != cudaSuccess;
}
|
551b486f693867301005892deb743a587365dc18.hip | // !!! This is a file automatically generated by hipify!!!
#include "DeviceStorage.hpp"
#include <stdio.h>
#include <stdlib.h>
// #include "complex.h"
#include <complex>
#include "hip/hip_runtime.h"
#include "rocblas.h"
//ywg #include "cblas.h"
#ifdef _OPENMP
#include <omp.h>
#else
inline int omp_get_thread_num() {return 0;}
#endif
#include "cudaCheckError.hpp"
#include "cudaDoubleComplex.hpp"
#include <assert.h>
#include <lapack.h>
extern "C" int zmatinv_prep1_ (void **a, void **b, int *n, int *lda, hipStream_t thisstream);
extern "C" int zmatinv_batch_ (hipDoubleComplex **A, hipDoubleComplex **Ainv, int *n, int *batch, hipStream_t thisstream);
extern "C" int ilaenv_(int*,char*,char*,int*,int*,int*,int*);
void handle_cuda_error ( hipError_t cerr, char *errmsg )
{
if ( cerr ) {
printf ("CUDA ERROR %s \n", errmsg);
abort();
}
else {
//printf ("SUCCESS !!! %s \n", errmsg);
}
}
void handle_cublas_error ( hipblasStatus_t cs, char *errmsg )
{
if ( cs ) {
printf ("cuBLAS ERROR %s \n", errmsg);
abort();
}
else {
//printf ("SUCCESS !!! %s \n", errmsg);
}
}
//TODO call directly from calculateTauMatrix (don't route through fortran)
extern "C"
void zblock_lu_cuda_c_ ( std::complex<double> *a, int *lda, int *blk_sz, int *nblk, int *ipvt, int *mp, int *idcol, int *k)
// void zblock_lu_cuda_c_ ( double complex *a, int *lda, int *blk_sz, int *nblk, int *ipvt, int *mp, int *idcol, int *k)
//===================================================================================================================
/*
Performs a partial inversion of the a matrix to return the inverse of the upper diagonal
subblock.
a : input matrix - double complex
blk_sz : integer array giving the size of each subblock
nblk : the number of subblocks
ipvt : integer work array (not tested in c version)
idcol : integer array specifying symmetry (not tested in c version)
k : returns the actual number of columns in the calculated inverse
*/
{
//TODO:
// adjust allocation sizes
// dynamically choose hybrid or not
// validate flop count
// printf("a(33,32) = %f\n",a[32*32]);
unsigned long long flops=0;
/********************paramters for zgemm rank maximization*******************/
int zgemm_rank = 600;
int gpu_only_blks = 0;
// printf("nblk = %d\n",*nblk);
int remaining=0;
for(int i=0;i<gpu_only_blks;i++) {
(*nblk)--;
remaining+=blk_sz[*nblk];
}
while(remaining>0) {
blk_sz[*nblk]=min(55,remaining);
remaining-=55;
(*nblk)++;
}
int currentRank=0;
int m, n;
int ioff, joff;
int info;
hipError_t ce;
hipblasStatus_t cublasStat;
// set constants
const hipDoubleComplex cone = make_cuDoubleComplex( 1.0, 0.0);
const hipDoubleComplex cmone = make_cuDoubleComplex(-1.0, 0.0);
const hipDoubleComplex czero = make_cuDoubleComplex( 0.0, 0.0);
// get the thread number
int threadId = omp_get_thread_num();
/***************************One time initialization, should be moved outside******************************************/
int max_blk_sz = 0;
for(int i=0; i<*nblk; i++) {
max_blk_sz=max(max_blk_sz,blk_sz[i]);
}
const int MAX_THREADS=16;
//TODO dynamically size
static bool initialized[MAX_THREADS] = {false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false};
static hipDoubleComplex *vdevWork[MAX_THREADS];
static hipDoubleComplex *vdevInv[MAX_THREADS];
static hipDoubleComplex *vdevA2[MAX_THREADS];
static std::complex<double> *vdevHostDiag[MAX_THREADS];
//static double complex *vdevHostDiag[MAX_THREADS];
static std::complex<double> *vwork[MAX_THREADS];
//static double complex *vwork[MAX_THREADS];
static int *vhostIPVT[MAX_THREADS];
static int lwork;
if ( ! initialized[threadId] ) {
//printf("Performing initiliaztion in zblock_lu_cuda_c!\n");
//calculate optimial work size for zgetri
int one=1; int mone=-1;
int NB = ilaenv_(&one,"ZGETRI","",&max_blk_sz,&mone,&mone,&mone);
lwork= max_blk_sz * NB;
// allocate space on device
ce = hipMalloc ( &vdevWork[threadId], max_blk_sz*max_blk_sz*sizeof(hipDoubleComplex));
handle_cuda_error (ce, "hipMalloc devWork");
ce = hipMalloc ( &vdevInv[threadId], max_blk_sz*max_blk_sz*sizeof(hipDoubleComplex));
handle_cuda_error (ce, "hipMalloc devInv");
int LDA= *lda;
ce = hipHostMalloc ( &vwork[threadId], lwork *sizeof(std::complex<double>));
// ce = hipHostMalloc ( &vwork[threadId], lwork *sizeof(double complex));
handle_cuda_error (ce, "hipHostMalloc vwork");
ce = hipMalloc ( &vdevA2[threadId], max_blk_sz * LDA *sizeof(hipDoubleComplex));
handle_cuda_error (ce, "hipMalloc devA2");
ce = hipHostMalloc ( &vdevHostDiag[threadId], max_blk_sz * max_blk_sz *sizeof(std::complex<double>));
// ce = hipHostMalloc ( &vdevHostDiag[threadId], max_blk_sz * max_blk_sz *sizeof(double complex));
handle_cuda_error (ce, "hipHostMalloc vdevHostDiag");
ce = hipHostMalloc((void**)&vhostIPVT[threadId], max_blk_sz*sizeof(int));
handle_cuda_error (ce, "hipHostMalloc vhostIPVT");
//this speeds up the small block inverse
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
initialized[threadId] = true;
}
/**********************************************************************************************************************/
/********************assign thread private variables********************************/
hipStream_t stream1=get_stream_(0);
hipStream_t stream2=get_stream_(1);
hipEvent_t done_event=get_cuda_event_();
hipDoubleComplex *devWork = vdevWork[threadId];
hipDoubleComplex *devInv = vdevInv[threadId];
hipDoubleComplex *devA=(hipDoubleComplex*)get_dev_m_();
hipDoubleComplex *devA2 = vdevA2[threadId];
std::complex<double> *work = vwork[threadId];
//double complex *work = vwork[threadId];
hipblasHandle_t cublasHandle = get_cublas_handle_();
int *hostIPVT=vhostIPVT[threadId];
Complex *hostAdiag = (Complex*)vdevHostDiag[threadId];
/***********************************************************************************/
// add up the sizes of the subblocks to get the size of the entire matrix
int na;
na = 0;
for ( int i=0; i<abs(*nblk); i++ ) {
na += blk_sz[i];
}
// eliminate columns that are equivalent due to symmetry
if ( idcol[0] == 0 ) {
*k = 1;
}
else {
*k = blk_sz[0]+1;
for ( int i=blk_sz[0]-1; i>=0; i-- ) {
if ( idcol[0] == 0 || idcol[i] == i ) {
*k -= 1;
if ( *k != i ) {
printf ("Eliminate columns that are equivalent due to symmetry section in zblock_lu_cuda_c not tested\n");
abort();
// zcopy ( na-blk_sz[0], a[i*lda+blk_sz[0]], 1, a[*k*lda+blk_sz[0]], 1 );
}
}
}
}
#ifndef BUILDKKRMATRIX_GPU
// copy matrix to device
cublasStat = hipblasSetMatrix ( na, na, sizeof(hipDoubleComplex), a, *lda, devA, *lda);
handle_cublas_error ( cublasStat, "hipblasSetMatrix devA ");
#endif
if ( *nblk > 0 ) {
n = blk_sz[*nblk-1];
joff = na - n;
// loop over sub-blocks
for ( int iblk=*nblk-1; iblk>0; iblk-- ) {
m = n;
ioff = joff;
n = blk_sz[iblk-1];
joff = joff-n;
//TODO update condition to chose branch, should do this branch when remaining size is small...
// HPL factorization and left propagation
if ( m<56 ) { //CUDA only version
//A^-1 // invert the clique
// re-package the diagonal block into a dense matrix suitable for sending to zmatinv
// printf("Use CUDA only version\n");
hipDoubleComplex *devAdiag;
devAdiag = &devA[ioff* *lda + ioff];
info = zmatinv_prep1_ ( (void**)&devAdiag, (void**)&devWork, &m, lda, stream1 );
if ( info ) { printf (" zmatinv_prep1 returned error code %d \n", info); abort();}
int one = 1;
info = zmatinv_batch_ ( &devWork, &devInv, &m, &one, stream1 );
if ( info ) { printf (" zmatinv_batch returned error code %d \n", info); printf (" m = %d, one = %d \n", m, one ); abort(); }
flops += m * m * m;
}
else { //HYBRID version, do small inverse on the host. This works well.
hipDoubleComplex *devAdiag = (hipDoubleComplex*)&devA[ioff* *lda + ioff];
hipblasSetStream ( cublasHandle, stream1 );
// printf("Use Hybrid version, m = %d, lda = %d\n",m,*lda);
cublasStat = hipblasGetMatrixAsync ( m, m, sizeof(hipDoubleComplex), devAdiag, *lda, hostAdiag, m, stream1 );
handle_cublas_error ( cublasStat, "hipblasGetMatrixAsync devAdiag and hostAdiag failed");
hipEventRecord(done_event,stream1);
//wait for transfers to the host to finish
hipEventSynchronize(done_event);
int info;
//zgetrf on host
zgetrf_(&m, &m, hostAdiag, &m, hostIPVT, &info);
if ( info ) { printf (" zgetrf returned error code %d \n", info); printf (" m = %d \n", m); abort(); }
//zgetri on host
zgetri_(&m, hostAdiag, &m, hostIPVT, (Complex*)work, &lwork, &info);
if ( info ) { printf (" zgetri returned error code %d \n", info); printf (" m = %d \n", m); abort(); }
flops += m * m * m;
//copy_async down to device
cublasStat = hipblasSetMatrixAsync ( m, m, sizeof(hipDoubleComplex), hostAdiag, m, devInv, m, stream1 );
hipEventRecord(done_event,stream1);
//wait for transfers to the host to finish
hipEventSynchronize(done_event);
}
//CA^-1
cublasStat = hipblasZgemm ( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, ioff, m, &cone, devInv, m, &devA[ioff], *lda, &czero, devA2, max_blk_sz );
handle_cublas_error ( cublasStat, "Error in hipblasZgemm #1\n" );
flops+= m * ioff * m;
//Mark end of small zgemm in stream1
hipEventRecord(done_event,stream1);
//stream 2 must wait for the small zgemm to finish
hipStreamWaitEvent(stream2,done_event,0);
// Trailing matrix update
currentRank+=m;
if ( currentRank<zgemm_rank && iblk>1) {
// only update the next block row
// little chance for hybrid acceleration here - so ignore for now.
hipblasSetStream ( cublasHandle, stream1 );
// need to place A2 back into A
ce = hipMemcpy2DAsync ( &devA[ioff], *lda*sizeof(hipDoubleComplex), devA2, max_blk_sz*sizeof(hipDoubleComplex), m*sizeof(hipDoubleComplex), ioff, hipMemcpyDeviceToDevice, stream1 );
cublasStat = hipblasZgemm ( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, ioff, currentRank, &cmone,
&devA[ioff* *lda +ioff-n], *lda,
//devA2, max_blk_sz, &cone,
&devA[ioff], *lda, &cone,
&devA[ioff-n], *lda );
flops += n * ioff * currentRank;
cublasStat = hipblasZgemm ( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, ioff-n, n, currentRank, &cmone,
&devA[ioff * *lda], *lda,
&devA[(ioff-n) * *lda + ioff], *lda, &cone,
&devA[(ioff-n) * *lda], *lda );
flops += (ioff-n)*n*currentRank;
}
else {
// update the full trailing matrix
hipblasSetStream ( cublasHandle, stream1 );
// perform a portion of the zgemm on the gpu
// first need to place A2 back into A
ce = hipMemcpy2DAsync ( &devA[ioff], *lda*sizeof(hipDoubleComplex), devA2, max_blk_sz*sizeof(hipDoubleComplex), m*sizeof(hipDoubleComplex), ioff, hipMemcpyDeviceToDevice, stream1 );
//D=CA^-1B
cublasStat = hipblasZgemm ( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, ioff, ioff, currentRank, &cmone,
&devA[ioff* *lda], *lda,
&devA[ioff], *lda , &cone,
devA, *lda);
flops += ioff * ioff * currentRank;
// just did a full trailing submatrix update, so reset block row delay counter
currentRank=0;
}
} // end for
cublasStat = hipblasGetMatrixAsync ( blk_sz[0], blk_sz[0], sizeof(hipDoubleComplex), devA, *lda, a, *lda, stream1 );
} // end if ( *nblk > 0 )
*k = blk_sz[0];
hipEventRecord(done_event,stream1);
//wait for last transfer to finish
hipEventSynchronize(done_event);
// clean up
//hipFree (devWork);
//hipFree (devInv);
//hipFree (devA);
//hipFree (devA2);
//hipHostFree (ap);
#ifdef PRINT_FLOPS
printf("BLOCK_INV ZGEMM FLOPS: %llu\n", flops*4*2);
#endif
}
| 551b486f693867301005892deb743a587365dc18.cu | #include "DeviceStorage.hpp"
#include <stdio.h>
#include <stdlib.h>
// #include "complex.h"
#include <complex>
#include "cuda_runtime.h"
#include "cublas_v2.h"
//ywg #include "cblas.h"
#ifdef _OPENMP
#include <omp.h>
#else
inline int omp_get_thread_num() {return 0;}
#endif
#include "cudaCheckError.hpp"
#include "cudaDoubleComplex.hpp"
#include <assert.h>
#include <lapack.h>
extern "C" int zmatinv_prep1_ (void **a, void **b, int *n, int *lda, cudaStream_t thisstream);
extern "C" int zmatinv_batch_ (cuDoubleComplex **A, cuDoubleComplex **Ainv, int *n, int *batch, cudaStream_t thisstream);
extern "C" int ilaenv_(int*,char*,char*,int*,int*,int*,int*);
void handle_cuda_error ( cudaError_t cerr, char *errmsg )
{
if ( cerr ) {
printf ("CUDA ERROR %s \n", errmsg);
abort();
}
else {
//printf ("SUCCESS !!! %s \n", errmsg);
}
}
void handle_cublas_error ( cublasStatus_t cs, char *errmsg )
{
if ( cs ) {
printf ("cuBLAS ERROR %s \n", errmsg);
abort();
}
else {
//printf ("SUCCESS !!! %s \n", errmsg);
}
}
//TODO call directly from calculateTauMatrix (don't route through fortran)
extern "C"
void zblock_lu_cuda_c_ ( std::complex<double> *a, int *lda, int *blk_sz, int *nblk, int *ipvt, int *mp, int *idcol, int *k)
// void zblock_lu_cuda_c_ ( double complex *a, int *lda, int *blk_sz, int *nblk, int *ipvt, int *mp, int *idcol, int *k)
//===================================================================================================================
/*
Performs a partial inversion of the a matrix to return the inverse of the upper diagonal
subblock.
a : input matrix - double complex
blk_sz : integer array giving the size of each subblock
nblk : the number of subblocks
ipvt : integer work array (not tested in c version)
idcol : integer array specifying symmetry (not tested in c version)
k : returns the actual number of columns in the calculated inverse
*/
{
//TODO:
// adjust allocation sizes
// dynamically choose hybrid or not
// validate flop count
// printf("a(33,32) = %f\n",a[32*32]);
unsigned long long flops=0;
/********************paramters for zgemm rank maximization*******************/
int zgemm_rank = 600;
int gpu_only_blks = 0;
// printf("nblk = %d\n",*nblk);
int remaining=0;
for(int i=0;i<gpu_only_blks;i++) {
(*nblk)--;
remaining+=blk_sz[*nblk];
}
while(remaining>0) {
blk_sz[*nblk]=min(55,remaining);
remaining-=55;
(*nblk)++;
}
int currentRank=0;
int m, n;
int ioff, joff;
int info;
cudaError_t ce;
cublasStatus_t cublasStat;
// set constants
const cuDoubleComplex cone = make_cuDoubleComplex( 1.0, 0.0);
const cuDoubleComplex cmone = make_cuDoubleComplex(-1.0, 0.0);
const cuDoubleComplex czero = make_cuDoubleComplex( 0.0, 0.0);
// get the thread number
int threadId = omp_get_thread_num();
/***************************One time initialization, should be moved outside******************************************/
int max_blk_sz = 0;
for(int i=0; i<*nblk; i++) {
max_blk_sz=max(max_blk_sz,blk_sz[i]);
}
const int MAX_THREADS=16;
//TODO dynamically size
static bool initialized[MAX_THREADS] = {false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false};
static cuDoubleComplex *vdevWork[MAX_THREADS];
static cuDoubleComplex *vdevInv[MAX_THREADS];
static cuDoubleComplex *vdevA2[MAX_THREADS];
static std::complex<double> *vdevHostDiag[MAX_THREADS];
//static double complex *vdevHostDiag[MAX_THREADS];
static std::complex<double> *vwork[MAX_THREADS];
//static double complex *vwork[MAX_THREADS];
static int *vhostIPVT[MAX_THREADS];
static int lwork;
if ( ! initialized[threadId] ) {
//printf("Performing initiliaztion in zblock_lu_cuda_c!\n");
//calculate optimial work size for zgetri
int one=1; int mone=-1;
int NB = ilaenv_(&one,"ZGETRI","",&max_blk_sz,&mone,&mone,&mone);
lwork= max_blk_sz * NB;
// allocate space on device
ce = cudaMalloc ( &vdevWork[threadId], max_blk_sz*max_blk_sz*sizeof(cuDoubleComplex));
handle_cuda_error (ce, "cudaMalloc devWork");
ce = cudaMalloc ( &vdevInv[threadId], max_blk_sz*max_blk_sz*sizeof(cuDoubleComplex));
handle_cuda_error (ce, "cudaMalloc devInv");
int LDA= *lda;
ce = cudaMallocHost ( &vwork[threadId], lwork *sizeof(std::complex<double>));
// ce = cudaMallocHost ( &vwork[threadId], lwork *sizeof(double complex));
handle_cuda_error (ce, "cudaMallocHost vwork");
ce = cudaMalloc ( &vdevA2[threadId], max_blk_sz * LDA *sizeof(cuDoubleComplex));
handle_cuda_error (ce, "cudaMalloc devA2");
ce = cudaMallocHost ( &vdevHostDiag[threadId], max_blk_sz * max_blk_sz *sizeof(std::complex<double>));
// ce = cudaMallocHost ( &vdevHostDiag[threadId], max_blk_sz * max_blk_sz *sizeof(double complex));
handle_cuda_error (ce, "cudaMallocHost vdevHostDiag");
ce = cudaMallocHost((void**)&vhostIPVT[threadId], max_blk_sz*sizeof(int));
handle_cuda_error (ce, "cudaMallocHost vhostIPVT");
//this speeds up the small block inverse
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
initialized[threadId] = true;
}
/**********************************************************************************************************************/
/********************assign thread private variables********************************/
cudaStream_t stream1=get_stream_(0);
cudaStream_t stream2=get_stream_(1);
cudaEvent_t done_event=get_cuda_event_();
cuDoubleComplex *devWork = vdevWork[threadId];
cuDoubleComplex *devInv = vdevInv[threadId];
cuDoubleComplex *devA=(cuDoubleComplex*)get_dev_m_();
cuDoubleComplex *devA2 = vdevA2[threadId];
std::complex<double> *work = vwork[threadId];
//double complex *work = vwork[threadId];
cublasHandle_t cublasHandle = get_cublas_handle_();
int *hostIPVT=vhostIPVT[threadId];
Complex *hostAdiag = (Complex*)vdevHostDiag[threadId];
/***********************************************************************************/
// add up the sizes of the subblocks to get the size of the entire matrix
int na;
na = 0;
for ( int i=0; i<abs(*nblk); i++ ) {
na += blk_sz[i];
}
// eliminate columns that are equivalent due to symmetry
if ( idcol[0] == 0 ) {
*k = 1;
}
else {
*k = blk_sz[0]+1;
for ( int i=blk_sz[0]-1; i>=0; i-- ) {
if ( idcol[0] == 0 || idcol[i] == i ) {
*k -= 1;
if ( *k != i ) {
printf ("Eliminate columns that are equivalent due to symmetry section in zblock_lu_cuda_c not tested\n");
abort();
// zcopy ( na-blk_sz[0], a[i*lda+blk_sz[0]], 1, a[*k*lda+blk_sz[0]], 1 );
}
}
}
}
#ifndef BUILDKKRMATRIX_GPU
// copy matrix to device
cublasStat = cublasSetMatrix ( na, na, sizeof(cuDoubleComplex), a, *lda, devA, *lda);
handle_cublas_error ( cublasStat, "cublasSetMatrix devA ");
#endif
if ( *nblk > 0 ) {
n = blk_sz[*nblk-1];
joff = na - n;
// loop over sub-blocks
for ( int iblk=*nblk-1; iblk>0; iblk-- ) {
m = n;
ioff = joff;
n = blk_sz[iblk-1];
joff = joff-n;
//TODO update condition to chose branch, should do this branch when remaining size is small...
// HPL factorization and left propagation
if ( m<56 ) { //CUDA only version
//A^-1 // invert the clique
// re-package the diagonal block into a dense matrix suitable for sending to zmatinv
// printf("Use CUDA only version\n");
cuDoubleComplex *devAdiag;
devAdiag = &devA[ioff* *lda + ioff];
info = zmatinv_prep1_ ( (void**)&devAdiag, (void**)&devWork, &m, lda, stream1 );
if ( info ) { printf (" zmatinv_prep1 returned error code %d \n", info); abort();}
int one = 1;
info = zmatinv_batch_ ( &devWork, &devInv, &m, &one, stream1 );
if ( info ) { printf (" zmatinv_batch returned error code %d \n", info); printf (" m = %d, one = %d \n", m, one ); abort(); }
flops += m * m * m;
}
else { //HYBRID version, do small inverse on the host. This works well.
cuDoubleComplex *devAdiag = (cuDoubleComplex*)&devA[ioff* *lda + ioff];
cublasSetStream ( cublasHandle, stream1 );
// printf("Use Hybrid version, m = %d, lda = %d\n",m,*lda);
cublasStat = cublasGetMatrixAsync ( m, m, sizeof(cuDoubleComplex), devAdiag, *lda, hostAdiag, m, stream1 );
handle_cublas_error ( cublasStat, "cublasGetMatrixAsync devAdiag and hostAdiag failed");
cudaEventRecord(done_event,stream1);
//wait for transfers to the host to finish
cudaEventSynchronize(done_event);
int info;
//zgetrf on host
zgetrf_(&m, &m, hostAdiag, &m, hostIPVT, &info);
if ( info ) { printf (" zgetrf returned error code %d \n", info); printf (" m = %d \n", m); abort(); }
//zgetri on host
zgetri_(&m, hostAdiag, &m, hostIPVT, (Complex*)work, &lwork, &info);
if ( info ) { printf (" zgetri returned error code %d \n", info); printf (" m = %d \n", m); abort(); }
flops += m * m * m;
//copy_async down to device
cublasStat = cublasSetMatrixAsync ( m, m, sizeof(cuDoubleComplex), hostAdiag, m, devInv, m, stream1 );
cudaEventRecord(done_event,stream1);
//wait for transfers to the host to finish
cudaEventSynchronize(done_event);
}
//CA^-1
cublasStat = cublasZgemm ( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, m, ioff, m, &cone, devInv, m, &devA[ioff], *lda, &czero, devA2, max_blk_sz );
handle_cublas_error ( cublasStat, "Error in cublasZgemm #1\n" );
flops+= m * ioff * m;
//Mark end of small zgemm in stream1
cudaEventRecord(done_event,stream1);
//stream 2 must wait for the small zgemm to finish
cudaStreamWaitEvent(stream2,done_event,0);
// Trailing matrix update
currentRank+=m;
if ( currentRank<zgemm_rank && iblk>1) {
// only update the next block row
// little chance for hybrid acceleration here - so ignore for now.
cublasSetStream ( cublasHandle, stream1 );
// need to place A2 back into A
ce = cudaMemcpy2DAsync ( &devA[ioff], *lda*sizeof(cuDoubleComplex), devA2, max_blk_sz*sizeof(cuDoubleComplex), m*sizeof(cuDoubleComplex), ioff, cudaMemcpyDeviceToDevice, stream1 );
cublasStat = cublasZgemm ( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, ioff, currentRank, &cmone,
&devA[ioff* *lda +ioff-n], *lda,
//devA2, max_blk_sz, &cone,
&devA[ioff], *lda, &cone,
&devA[ioff-n], *lda );
flops += n * ioff * currentRank;
cublasStat = cublasZgemm ( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, ioff-n, n, currentRank, &cmone,
&devA[ioff * *lda], *lda,
&devA[(ioff-n) * *lda + ioff], *lda, &cone,
&devA[(ioff-n) * *lda], *lda );
flops += (ioff-n)*n*currentRank;
}
else {
// update the full trailing matrix
cublasSetStream ( cublasHandle, stream1 );
// perform a portion of the zgemm on the gpu
// first need to place A2 back into A
ce = cudaMemcpy2DAsync ( &devA[ioff], *lda*sizeof(cuDoubleComplex), devA2, max_blk_sz*sizeof(cuDoubleComplex), m*sizeof(cuDoubleComplex), ioff, cudaMemcpyDeviceToDevice, stream1 );
//D=CA^-1B
cublasStat = cublasZgemm ( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, ioff, ioff, currentRank, &cmone,
&devA[ioff* *lda], *lda,
&devA[ioff], *lda , &cone,
devA, *lda);
flops += ioff * ioff * currentRank;
// just did a full trailing submatrix update, so reset block row delay counter
currentRank=0;
}
} // end for
cublasStat = cublasGetMatrixAsync ( blk_sz[0], blk_sz[0], sizeof(cuDoubleComplex), devA, *lda, a, *lda, stream1 );
} // end if ( *nblk > 0 )
*k = blk_sz[0];
cudaEventRecord(done_event,stream1);
//wait for last transfer to finish
cudaEventSynchronize(done_event);
// clean up
//cudaFree (devWork);
//cudaFree (devInv);
//cudaFree (devA);
//cudaFree (devA2);
//cudaFreeHost (ap);
#ifdef PRINT_FLOPS
printf("BLOCK_INV ZGEMM FLOPS: %llu\n", flops*4*2);
#endif
}
|
ed8d112e5762195d552d8856b27a10e1067b419a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "box3d2r-32x16-1-128_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 2 - 2);
const AN5D_TYPE __c3Pad = (2);
#define __c3 c3
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __halo3 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 12;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-0.324f) * (__REGREF(__a, 0, 0))) + (0.0020f * (__SBREF(__a_sb, -2, -2)))) + (0.0030f * (__SBREF(__a_sb, -2, -1)))) + (0.0040f * (__SBREF(__a_sb, -2, 0)))) + (0.0050f * (__SBREF(__a_sb, -2, 1)))) + (0.0060f * (__SBREF(__a_sb, -2, 2)))) + (0.0070f * (__SBREF(__a_sb, -1, -2)))) + (0.0080f * (__SBREF(__a_sb, -1, -1)))) + (0.0090f * (__SBREF(__a_sb, -1, 0)))) + (0.0100f * (__SBREF(__a_sb, -1, 1)))) + (0.0110f * (__SBREF(__a_sb, -1, 2)))) + (0.0120f * (__SBREF(__a_sb, 0, -2)))) + (0.0130f * (__SBREF(__a_sb, 0, -1)))) + (0.0140f * (__SBREF(__a_sb, 0, 1)))) + (0.0150f * (__SBREF(__a_sb, 0, 2)))) + (0.0160f * (__SBREF(__a_sb, 1, -2)))) + (0.0170f * (__SBREF(__a_sb, 1, -1)))) + (0.0180f * (__SBREF(__a_sb, 1, 0)))) + (0.0190f * (__SBREF(__a_sb, 1, 1)))) + (0.0200f * (__SBREF(__a_sb, 1, 2)))) + (0.0210f * (__SBREF(__a_sb, 2, -2)))) + (0.0220f * (__SBREF(__a_sb, 2, -1)))) + (0.0230f * (__SBREF(__a_sb, 2, 0)))) + (0.0240f * (__SBREF(__a_sb, 2, 1)))) + (0.0250f * (__SBREF(__a_sb, 2, 2)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-(0.3264f * (__REGREF(__a, 0, 0)))) + (0.0021f * (__SBREF(__a_sb, -2, -2)))) + (0.0031f * (__SBREF(__a_sb, -2, -1)))) + (0.0041f * (__SBREF(__a_sb, -2, 0)))) + (0.0051f * (__SBREF(__a_sb, -2, 1)))) + (0.0061f * (__SBREF(__a_sb, -2, 2)))) + (0.0071f * (__SBREF(__a_sb, -1, -2)))) + (0.0081f * (__SBREF(__a_sb, -1, -1)))) + (0.0091f * (__SBREF(__a_sb, -1, 0)))) + (0.0101f * (__SBREF(__a_sb, -1, 1)))) + (0.0111f * (__SBREF(__a_sb, -1, 2)))) + (0.0121f * (__SBREF(__a_sb, 0, -2)))) + (0.0131f * (__SBREF(__a_sb, 0, -1)))) + (0.0141f * (__SBREF(__a_sb, 0, 1)))) + (0.0151f * (__SBREF(__a_sb, 0, 2)))) + (0.0161f * (__SBREF(__a_sb, 1, -2)))) + (0.0171f * (__SBREF(__a_sb, 1, -1)))) + (0.0181f * (__SBREF(__a_sb, 1, 0)))) + (0.0191f * (__SBREF(__a_sb, 1, 1)))) + (0.0201f * (__SBREF(__a_sb, 1, 2)))) + (0.0211f * (__SBREF(__a_sb, 2, -2)))) + (0.0221f * (__SBREF(__a_sb, 2, -1)))) + (0.0231f * (__SBREF(__a_sb, 2, 0)))) + (0.0241f * (__SBREF(__a_sb, 2, 1)))) + (0.0251f * (__SBREF(__a_sb, 2, 2))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.6712f * (__REGREF(__a, 0, 0)))) + (0.0022f * (__SBREF(__a_sb, -2, -2)))) + (0.0032f * (__SBREF(__a_sb, -2, -1)))) + (0.0042f * (__SBREF(__a_sb, -2, 0)))) + (0.0052f * (__SBREF(__a_sb, -2, 1)))) + (0.0062f * (__SBREF(__a_sb, -2, 2)))) + (0.0072f * (__SBREF(__a_sb, -1, -2)))) + (0.0082f * (__SBREF(__a_sb, -1, -1)))) + (0.0092f * (__SBREF(__a_sb, -1, 0)))) + (0.0102f * (__SBREF(__a_sb, -1, 1)))) + (0.0112f * (__SBREF(__a_sb, -1, 2)))) + (0.0122f * (__SBREF(__a_sb, 0, -2)))) + (0.0132f * (__SBREF(__a_sb, 0, -1)))) + (0.0142f * (__SBREF(__a_sb, 0, 1)))) + (0.0152f * (__SBREF(__a_sb, 0, 2)))) + (0.0162f * (__SBREF(__a_sb, 1, -2)))) + (0.0172f * (__SBREF(__a_sb, 1, -1)))) + (0.0182f * (__SBREF(__a_sb, 1, 0)))) + (0.0192f * (__SBREF(__a_sb, 1, 1)))) + (0.0202f * (__SBREF(__a_sb, 1, 2)))) + (0.0212f * (__SBREF(__a_sb, 2, -2)))) + (0.0222f * (__SBREF(__a_sb, 2, -1)))) + (0.0232f * (__SBREF(__a_sb, 2, 0)))) + (0.0242f * (__SBREF(__a_sb, 2, 1)))) + (0.0252f * (__SBREF(__a_sb, 2, 2)))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((-(0.3312f * (__REGREF(__a, 0, 0)))) + (0.0023f * (__SBREF(__a_sb, -2, -2)))) + (0.0033f * (__SBREF(__a_sb, -2, -1)))) + (0.0043f * (__SBREF(__a_sb, -2, 0)))) + (0.0053f * (__SBREF(__a_sb, -2, 1)))) + (0.0063f * (__SBREF(__a_sb, -2, 2)))) + (0.0073f * (__SBREF(__a_sb, -1, -2)))) + (0.0083f * (__SBREF(__a_sb, -1, -1)))) + (0.0093f * (__SBREF(__a_sb, -1, 0)))) + (0.0103f * (__SBREF(__a_sb, -1, 1)))) + (0.0113f * (__SBREF(__a_sb, -1, 2)))) + (0.0123f * (__SBREF(__a_sb, 0, -2)))) + (0.0133f * (__SBREF(__a_sb, 0, -1)))) + (0.0143f * (__SBREF(__a_sb, 0, 1)))) + (0.0153f * (__SBREF(__a_sb, 0, 2)))) + (0.0163f * (__SBREF(__a_sb, 1, -2)))) + (0.0173f * (__SBREF(__a_sb, 1, -1)))) + (0.0183f * (__SBREF(__a_sb, 1, 0)))) + (0.0193f * (__SBREF(__a_sb, 1, 1)))) + (0.0203f * (__SBREF(__a_sb, 1, 2)))) + (0.0213f * (__SBREF(__a_sb, 2, -2)))) + (0.0223f * (__SBREF(__a_sb, 2, -1)))) + (0.0233f * (__SBREF(__a_sb, 2, 0)))) + (0.0243f * (__SBREF(__a_sb, 2, 1)))) + (0.0253f * (__SBREF(__a_sb, 2, 2))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((-(0.3336f * (__REGREF(__a, 0, 0)))) + (0.0024f * (__SBREF(__a_sb, -2, -2)))) + (0.0034f * (__SBREF(__a_sb, -2, -1)))) + (0.0044f * (__SBREF(__a_sb, -2, 0)))) + (0.0054f * (__SBREF(__a_sb, -2, 1)))) + (0.0064f * (__SBREF(__a_sb, -2, 2)))) + (0.0074f * (__SBREF(__a_sb, -1, -2)))) + (0.0084f * (__SBREF(__a_sb, -1, -1)))) + (0.0094f * (__SBREF(__a_sb, -1, 0)))) + (0.0104f * (__SBREF(__a_sb, -1, 1)))) + (0.0114f * (__SBREF(__a_sb, -1, 2)))) + (0.0124f * (__SBREF(__a_sb, 0, -2)))) + (0.0134f * (__SBREF(__a_sb, 0, -1)))) + (0.0144f * (__SBREF(__a_sb, 0, 1)))) + (0.0154f * (__SBREF(__a_sb, 0, 2)))) + (0.0164f * (__SBREF(__a_sb, 1, -2)))) + (0.0174f * (__SBREF(__a_sb, 1, -1)))) + (0.0184f * (__SBREF(__a_sb, 1, 0)))) + (0.0194f * (__SBREF(__a_sb, 1, 1)))) + (0.0204f * (__SBREF(__a_sb, 1, 2)))) + (0.0214f * (__SBREF(__a_sb, 2, -2)))) + (0.0224f * (__SBREF(__a_sb, 2, -1)))) + (0.0234f * (__SBREF(__a_sb, 2, 0)))) + (0.0244f * (__SBREF(__a_sb, 2, 1)))) + (0.0254f * (__SBREF(__a_sb, 2, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
}
}
| ed8d112e5762195d552d8856b27a10e1067b419a.cu | #include "box3d2r-32x16-1-128_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 2 - 2);
const AN5D_TYPE __c3Pad = (2);
#define __c3 c3
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __halo3 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 12;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-0.324f) * (__REGREF(__a, 0, 0))) + (0.0020f * (__SBREF(__a_sb, -2, -2)))) + (0.0030f * (__SBREF(__a_sb, -2, -1)))) + (0.0040f * (__SBREF(__a_sb, -2, 0)))) + (0.0050f * (__SBREF(__a_sb, -2, 1)))) + (0.0060f * (__SBREF(__a_sb, -2, 2)))) + (0.0070f * (__SBREF(__a_sb, -1, -2)))) + (0.0080f * (__SBREF(__a_sb, -1, -1)))) + (0.0090f * (__SBREF(__a_sb, -1, 0)))) + (0.0100f * (__SBREF(__a_sb, -1, 1)))) + (0.0110f * (__SBREF(__a_sb, -1, 2)))) + (0.0120f * (__SBREF(__a_sb, 0, -2)))) + (0.0130f * (__SBREF(__a_sb, 0, -1)))) + (0.0140f * (__SBREF(__a_sb, 0, 1)))) + (0.0150f * (__SBREF(__a_sb, 0, 2)))) + (0.0160f * (__SBREF(__a_sb, 1, -2)))) + (0.0170f * (__SBREF(__a_sb, 1, -1)))) + (0.0180f * (__SBREF(__a_sb, 1, 0)))) + (0.0190f * (__SBREF(__a_sb, 1, 1)))) + (0.0200f * (__SBREF(__a_sb, 1, 2)))) + (0.0210f * (__SBREF(__a_sb, 2, -2)))) + (0.0220f * (__SBREF(__a_sb, 2, -1)))) + (0.0230f * (__SBREF(__a_sb, 2, 0)))) + (0.0240f * (__SBREF(__a_sb, 2, 1)))) + (0.0250f * (__SBREF(__a_sb, 2, 2)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-(0.3264f * (__REGREF(__a, 0, 0)))) + (0.0021f * (__SBREF(__a_sb, -2, -2)))) + (0.0031f * (__SBREF(__a_sb, -2, -1)))) + (0.0041f * (__SBREF(__a_sb, -2, 0)))) + (0.0051f * (__SBREF(__a_sb, -2, 1)))) + (0.0061f * (__SBREF(__a_sb, -2, 2)))) + (0.0071f * (__SBREF(__a_sb, -1, -2)))) + (0.0081f * (__SBREF(__a_sb, -1, -1)))) + (0.0091f * (__SBREF(__a_sb, -1, 0)))) + (0.0101f * (__SBREF(__a_sb, -1, 1)))) + (0.0111f * (__SBREF(__a_sb, -1, 2)))) + (0.0121f * (__SBREF(__a_sb, 0, -2)))) + (0.0131f * (__SBREF(__a_sb, 0, -1)))) + (0.0141f * (__SBREF(__a_sb, 0, 1)))) + (0.0151f * (__SBREF(__a_sb, 0, 2)))) + (0.0161f * (__SBREF(__a_sb, 1, -2)))) + (0.0171f * (__SBREF(__a_sb, 1, -1)))) + (0.0181f * (__SBREF(__a_sb, 1, 0)))) + (0.0191f * (__SBREF(__a_sb, 1, 1)))) + (0.0201f * (__SBREF(__a_sb, 1, 2)))) + (0.0211f * (__SBREF(__a_sb, 2, -2)))) + (0.0221f * (__SBREF(__a_sb, 2, -1)))) + (0.0231f * (__SBREF(__a_sb, 2, 0)))) + (0.0241f * (__SBREF(__a_sb, 2, 1)))) + (0.0251f * (__SBREF(__a_sb, 2, 2))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.6712f * (__REGREF(__a, 0, 0)))) + (0.0022f * (__SBREF(__a_sb, -2, -2)))) + (0.0032f * (__SBREF(__a_sb, -2, -1)))) + (0.0042f * (__SBREF(__a_sb, -2, 0)))) + (0.0052f * (__SBREF(__a_sb, -2, 1)))) + (0.0062f * (__SBREF(__a_sb, -2, 2)))) + (0.0072f * (__SBREF(__a_sb, -1, -2)))) + (0.0082f * (__SBREF(__a_sb, -1, -1)))) + (0.0092f * (__SBREF(__a_sb, -1, 0)))) + (0.0102f * (__SBREF(__a_sb, -1, 1)))) + (0.0112f * (__SBREF(__a_sb, -1, 2)))) + (0.0122f * (__SBREF(__a_sb, 0, -2)))) + (0.0132f * (__SBREF(__a_sb, 0, -1)))) + (0.0142f * (__SBREF(__a_sb, 0, 1)))) + (0.0152f * (__SBREF(__a_sb, 0, 2)))) + (0.0162f * (__SBREF(__a_sb, 1, -2)))) + (0.0172f * (__SBREF(__a_sb, 1, -1)))) + (0.0182f * (__SBREF(__a_sb, 1, 0)))) + (0.0192f * (__SBREF(__a_sb, 1, 1)))) + (0.0202f * (__SBREF(__a_sb, 1, 2)))) + (0.0212f * (__SBREF(__a_sb, 2, -2)))) + (0.0222f * (__SBREF(__a_sb, 2, -1)))) + (0.0232f * (__SBREF(__a_sb, 2, 0)))) + (0.0242f * (__SBREF(__a_sb, 2, 1)))) + (0.0252f * (__SBREF(__a_sb, 2, 2)))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((-(0.3312f * (__REGREF(__a, 0, 0)))) + (0.0023f * (__SBREF(__a_sb, -2, -2)))) + (0.0033f * (__SBREF(__a_sb, -2, -1)))) + (0.0043f * (__SBREF(__a_sb, -2, 0)))) + (0.0053f * (__SBREF(__a_sb, -2, 1)))) + (0.0063f * (__SBREF(__a_sb, -2, 2)))) + (0.0073f * (__SBREF(__a_sb, -1, -2)))) + (0.0083f * (__SBREF(__a_sb, -1, -1)))) + (0.0093f * (__SBREF(__a_sb, -1, 0)))) + (0.0103f * (__SBREF(__a_sb, -1, 1)))) + (0.0113f * (__SBREF(__a_sb, -1, 2)))) + (0.0123f * (__SBREF(__a_sb, 0, -2)))) + (0.0133f * (__SBREF(__a_sb, 0, -1)))) + (0.0143f * (__SBREF(__a_sb, 0, 1)))) + (0.0153f * (__SBREF(__a_sb, 0, 2)))) + (0.0163f * (__SBREF(__a_sb, 1, -2)))) + (0.0173f * (__SBREF(__a_sb, 1, -1)))) + (0.0183f * (__SBREF(__a_sb, 1, 0)))) + (0.0193f * (__SBREF(__a_sb, 1, 1)))) + (0.0203f * (__SBREF(__a_sb, 1, 2)))) + (0.0213f * (__SBREF(__a_sb, 2, -2)))) + (0.0223f * (__SBREF(__a_sb, 2, -1)))) + (0.0233f * (__SBREF(__a_sb, 2, 0)))) + (0.0243f * (__SBREF(__a_sb, 2, 1)))) + (0.0253f * (__SBREF(__a_sb, 2, 2))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((-(0.3336f * (__REGREF(__a, 0, 0)))) + (0.0024f * (__SBREF(__a_sb, -2, -2)))) + (0.0034f * (__SBREF(__a_sb, -2, -1)))) + (0.0044f * (__SBREF(__a_sb, -2, 0)))) + (0.0054f * (__SBREF(__a_sb, -2, 1)))) + (0.0064f * (__SBREF(__a_sb, -2, 2)))) + (0.0074f * (__SBREF(__a_sb, -1, -2)))) + (0.0084f * (__SBREF(__a_sb, -1, -1)))) + (0.0094f * (__SBREF(__a_sb, -1, 0)))) + (0.0104f * (__SBREF(__a_sb, -1, 1)))) + (0.0114f * (__SBREF(__a_sb, -1, 2)))) + (0.0124f * (__SBREF(__a_sb, 0, -2)))) + (0.0134f * (__SBREF(__a_sb, 0, -1)))) + (0.0144f * (__SBREF(__a_sb, 0, 1)))) + (0.0154f * (__SBREF(__a_sb, 0, 2)))) + (0.0164f * (__SBREF(__a_sb, 1, -2)))) + (0.0174f * (__SBREF(__a_sb, 1, -1)))) + (0.0184f * (__SBREF(__a_sb, 1, 0)))) + (0.0194f * (__SBREF(__a_sb, 1, 1)))) + (0.0204f * (__SBREF(__a_sb, 1, 2)))) + (0.0214f * (__SBREF(__a_sb, 2, -2)))) + (0.0224f * (__SBREF(__a_sb, 2, -1)))) + (0.0234f * (__SBREF(__a_sb, 2, 0)))) + (0.0244f * (__SBREF(__a_sb, 2, 1)))) + (0.0254f * (__SBREF(__a_sb, 2, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
}
}
|
14b5c250ae1947a0e110c6990f996359ccb8cec4.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common.h"
#include "gpuCudaLib.h"
#include "schema.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#ifdef HAS_GMM
#include "gmm.h"
#endif
extern "C" __global__ void materialize(char **content, int colNum, int *attrSize, long tupleNum, int tupleSize,
char *result) {
int startIndex = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = startIndex; i < tupleNum; i += stride) {
int offset = 0;
for (int j = 0; j < colNum; j++) {
int aSize = attrSize[j];
memcpy(result + i * tupleSize + offset, content[j] + i * aSize, aSize);
offset += aSize;
}
}
}
char *materializeCol(struct materializeNode *mn, struct statistic *pp) {
struct timespec start, end;
clock_gettime(CLOCK_REALTIME, &start);
struct tableNode *tn = mn->table;
char *res, *gpuResult;
char **gpuContent, **column;
long size = tn->tupleNum * tn->tupleSize;
int *gpuAttrSize;
column = (char **)malloc(sizeof(char *) * tn->totalAttr);
CHECK_POINTER(column);
#ifdef HAS_GMM
CUDA_SAFE_CALL_NO_SYNC(cudaMallocEx((void **)&gpuContent, sizeof(char *) * tn->totalAttr, FLAG_PTARRAY));
#else
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuContent, sizeof(char *) * tn->totalAttr));
#endif
res = (char *)malloc(size);
CHECK_POINTER(res);
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuResult, size));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuAttrSize, sizeof(int) * tn->totalAttr));
for (int i = 0; i < tn->totalAttr; i++) {
if (tn->dataPos[i] == MEM) {
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&column[i], tn->tupleNum * tn->attrSize[i]));
CUDA_SAFE_CALL_NO_SYNC(
hipMemcpy(column[i], tn->content[i], tn->tupleNum * tn->attrSize[i], hipMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&gpuContent[i], &column[i], sizeof(char *), hipMemcpyHostToDevice));
} else if (tn->dataPos[i] == GPU) {
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&gpuContent[i], &tn->content[i], sizeof(char *), hipMemcpyHostToDevice));
}
}
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuAttrSize, tn->attrSize, sizeof(int) * tn->totalAttr, hipMemcpyHostToDevice));
dim3 grid(512);
dim3 block(128);
GMM_CALL(cudaAdvise(0, CADV_INPUT | CADV_PTAINPUT));
GMM_CALL(cudaAdvise(2, CADV_INPUT));
GMM_CALL(cudaAdvise(5, CADV_OUTPUT));
GMM_CALL(cudaSetFunction(107));
hipLaunchKernelGGL(( materialize), dim3(grid), dim3(block), 0, 0, gpuContent, tn->totalAttr, gpuAttrSize, tn->tupleNum, tn->tupleSize, gpuResult);
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(res, gpuResult, size, hipMemcpyDeviceToHost));
for (int i = 0; i < tn->totalAttr; i++) {
if (tn->dataPos[i] == MEM) {
CUDA_SAFE_CALL_NO_SYNC(hipFree(column[i]));
}
}
free(column);
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuContent));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuAttrSize));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuResult));
clock_gettime(CLOCK_REALTIME, &end);
double timeE = (end.tv_sec - start.tv_sec) * BILLION + end.tv_nsec - start.tv_nsec;
printf("Materialization Time: %lf\n", timeE / (1000 * 1000));
return res;
}
| 14b5c250ae1947a0e110c6990f996359ccb8cec4.cu | /*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common.h"
#include "gpuCudaLib.h"
#include "schema.h"
#include <cuda.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#ifdef HAS_GMM
#include "gmm.h"
#endif
extern "C" __global__ void materialize(char **content, int colNum, int *attrSize, long tupleNum, int tupleSize,
char *result) {
int startIndex = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = startIndex; i < tupleNum; i += stride) {
int offset = 0;
for (int j = 0; j < colNum; j++) {
int aSize = attrSize[j];
memcpy(result + i * tupleSize + offset, content[j] + i * aSize, aSize);
offset += aSize;
}
}
}
char *materializeCol(struct materializeNode *mn, struct statistic *pp) {
struct timespec start, end;
clock_gettime(CLOCK_REALTIME, &start);
struct tableNode *tn = mn->table;
char *res, *gpuResult;
char **gpuContent, **column;
long size = tn->tupleNum * tn->tupleSize;
int *gpuAttrSize;
column = (char **)malloc(sizeof(char *) * tn->totalAttr);
CHECK_POINTER(column);
#ifdef HAS_GMM
CUDA_SAFE_CALL_NO_SYNC(cudaMallocEx((void **)&gpuContent, sizeof(char *) * tn->totalAttr, FLAG_PTARRAY));
#else
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuContent, sizeof(char *) * tn->totalAttr));
#endif
res = (char *)malloc(size);
CHECK_POINTER(res);
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuResult, size));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuAttrSize, sizeof(int) * tn->totalAttr));
for (int i = 0; i < tn->totalAttr; i++) {
if (tn->dataPos[i] == MEM) {
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&column[i], tn->tupleNum * tn->attrSize[i]));
CUDA_SAFE_CALL_NO_SYNC(
cudaMemcpy(column[i], tn->content[i], tn->tupleNum * tn->attrSize[i], cudaMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&gpuContent[i], &column[i], sizeof(char *), cudaMemcpyHostToDevice));
} else if (tn->dataPos[i] == GPU) {
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&gpuContent[i], &tn->content[i], sizeof(char *), cudaMemcpyHostToDevice));
}
}
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuAttrSize, tn->attrSize, sizeof(int) * tn->totalAttr, cudaMemcpyHostToDevice));
dim3 grid(512);
dim3 block(128);
GMM_CALL(cudaAdvise(0, CADV_INPUT | CADV_PTAINPUT));
GMM_CALL(cudaAdvise(2, CADV_INPUT));
GMM_CALL(cudaAdvise(5, CADV_OUTPUT));
GMM_CALL(cudaSetFunction(107));
materialize<<<grid, block>>>(gpuContent, tn->totalAttr, gpuAttrSize, tn->tupleNum, tn->tupleSize, gpuResult);
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(res, gpuResult, size, cudaMemcpyDeviceToHost));
for (int i = 0; i < tn->totalAttr; i++) {
if (tn->dataPos[i] == MEM) {
CUDA_SAFE_CALL_NO_SYNC(cudaFree(column[i]));
}
}
free(column);
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuContent));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuAttrSize));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuResult));
clock_gettime(CLOCK_REALTIME, &end);
double timeE = (end.tv_sec - start.tv_sec) * BILLION + end.tv_nsec - start.tv_nsec;
printf("Materialization Time: %lf\n", timeE / (1000 * 1000));
return res;
}
|
8eb645e19e6375e9f00226bc14a80eda1c68d5b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bits/mexutils.h"
#include "bits/datamex.hpp"
#if ENABLE_GPU
#include "bits/datacu.hpp"
#endif
#include <math_constants.h>
#undef printf
#include <stdio.h>
/* option codes */
enum {
opt_stride = 0,
opt_pad,
opt_pool_switches,
opt_unpool_output_size,
opt_sum,
opt_verbose,
} ;
/* options */
vlmxOption options [] = {
{"Stride", 1, opt_stride },
{"Pad", 1, opt_pad },
{"PoolSwitches", 1, opt_pool_switches },
{"UnpoolOutputSize", 1, opt_unpool_output_size },
{"Sum", 1, opt_sum },
{"Verbose", 0, opt_verbose },
{0, 0, 0 }
} ;
/* ---------------------------------------------------------------- */
/* Context */
/* ---------------------------------------------------------------- */
vl::MexContext context ;
/*
Resetting the context here resolves a crash when MATLAB quits and
the ~Context function is implicitly called on unloading the MEX file.
*/
void atExit()
{
context.clear() ;
}
/* ---------------------------------------------------------------- */
/* unpooling_max_forward_dm_kernel */
/* ---------------------------------------------------------------- */
template <typename T> __global__ void
unpooling_max_forward_dm_kernel
(T* unpooled,
const T* data,
const uint8_t* poolSwitches,
const T* sum,
const int nthreads,
const int pooledWidth,
const int pooledHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int x_data = index ;
int y_data = x_data / width ;
int z = y_data / height ;
x_data %= width ;
y_data %= height ;
int dx = x_data + padLeft - poolWidth ;
int dy = y_data + padTop - poolHeight ;
int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ;
int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ;
int px2 = min((x_data + padLeft) / strideX, pooledWidth - 1) ;
int py2 = min((y_data + padTop) / strideY, pooledHeight - 1) ;
T unpoolValue = (T)(-CUDART_INF_F);
poolSwitches += z * pooledHeight * pooledWidth ;
data += z * pooledHeight * pooledWidth ;
for (int py = py1; py <= py2; ++py) {
for (int px = px1; px <= px2; ++px) {
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int loc = poolSwitches[py * pooledWidth + px] - 1 ;
int lx = loc % poolWidth ;
int ly = loc / poolWidth ;
if(x_data == (x1 + lx) && y_data == (y1 + ly)) {
if (data[py * pooledWidth + px] > unpoolValue) {
unpoolValue = data[py * pooledWidth + px];
}
}
}
}
if (sum) {
unpoolValue += sum[index] ;
}
unpooled[index] = unpoolValue;
}
}
/* ---------------------------------------------------------------- */
/* unpooling_max_backward_dm_kernel */
/* ---------------------------------------------------------------- */
template <typename T> __global__ void
unpooling_max_backward_dm_kernel
(T* derData,
T* derSum,
const T* data,
const uint8_t* poolSwitches,
const T* derUnpooled,
const int nthreads,
const int pooledWidth,
const int pooledHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int x_data = index ;
int y_data = x_data / width ;
int z = y_data / height ;
x_data %= width ;
y_data %= height ;
int dx = x_data + padLeft - poolWidth ;
int dy = y_data + padTop - poolHeight ;
int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ;
int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ;
int px2 = min((x_data + padLeft) / strideX, pooledWidth - 1) ;
int py2 = min((y_data + padTop) / strideY, pooledHeight - 1) ;
T unpoolValue = (T)(-CUDART_INF_F);
T derValue = 0;
poolSwitches += z * pooledHeight * pooledWidth ;
derData += z * pooledHeight * pooledWidth ;
data += z * pooledHeight * pooledWidth ;
int derDataIndex = -1 ;
for (int py = py1; py <= py2; ++py) {
for (int px = px1; px <= px2; ++px) {
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int loc = poolSwitches[py * pooledWidth + px] - 1 ;
int lx = loc % poolWidth ;
int ly = loc / poolWidth ;
if(x_data == (x1 + lx) && y_data == (y1 + ly)) {
if (data[py * pooledWidth + px] > unpoolValue) {
unpoolValue = data[py * pooledWidth + px];
derDataIndex = py * pooledWidth + px;
derValue = derUnpooled[index];
}
}
}
}
if (derDataIndex != -1) {
derData[derDataIndex] = derValue;
if (derSum != NULL) {
derSum[index] = derValue ;
}
}
}
}
/* ---------------------------------------------------------------- */
/* MEX driver */
/* ---------------------------------------------------------------- */
enum {
IN_DATA = 0, IN_SIZE, IN_DEROUTPUT, IN_END
} ;
enum {
OUT_RESULT = 0, OUT_DERSUM, OUT_END
} ;
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
int poolWidth ;
int poolHeight ;
int strideX = 1 ;
int strideY = 1 ;
int padLeft = 0 ;
int padRight = 0 ;
int padTop = 0 ;
int padBottom = 0 ;
bool backMode = false ;
bool doDerSum = false ;
mxArray const *poolSwitchesIn = NULL ;
mxArray const *sumIn = NULL ;
int unpooledHeight = 0;
int unpooledWidth = 0;
int verbosity = 0 ;
int opt ;
int next = IN_END ;
mxArray const *optarg ;
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
mexAtExit(atExit) ;
if (nin < 2) {
mexErrMsgTxt("The arguments are less than two.") ;
}
if (nin > 2 && vlmxIsString(in[2],-1)) {
next = 2 ;
backMode = 0 ;
} else {
backMode = (nin >= 3) ;
}
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_verbose :
++ verbosity ;
break ;
case opt_stride :
if (!vlmxIsPlainMatrix(optarg,-1,-1)) {
mexErrMsgTxt("STRIDE is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1:
strideY = (int)mxGetPr(optarg)[0] ;
strideX = strideY ;
break ;
case 2:
strideY = (int)mxGetPr(optarg)[0] ;
strideX = (int)mxGetPr(optarg)[1] ;
break ;
default:
mexErrMsgTxt("STRIDE has neither one nor two elements.") ;
}
break ;
case opt_pad :
if (!vlmxIsPlainMatrix(optarg,-1,-1)) {
mexErrMsgTxt("PAD is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1:
padLeft = (int)mxGetPr(optarg)[0] ;
padRight = padLeft ;
padTop = padLeft ;
padBottom = padLeft ;
break ;
case 4:
padTop = (int)mxGetPr(optarg)[0] ;
padBottom = (int)mxGetPr(optarg)[1] ;
padLeft = (int)mxGetPr(optarg)[2] ;
padRight = (int)mxGetPr(optarg)[3] ;
break ;
default:
mexErrMsgTxt("PAD has neither one nor four elements.") ;
}
break;
case opt_pool_switches :
poolSwitchesIn = optarg ;
break ;
case opt_unpool_output_size :
if (!vlmxIsPlainMatrix(optarg,-1,-1)) {
mexErrMsgTxt("UNPOOLOUTPUTSIZE is not a plain matrix.") ;
}
if (mxGetNumberOfElements(optarg) >= 2) {
unpooledHeight = (int)mxGetPr(optarg)[0] ;
unpooledWidth = (int)mxGetPr(optarg)[1] ;
} else {
mexErrMsgTxt("UNPOOLOUTPUTSIZE has less than 2 elements") ;
}
break ;
case opt_sum :
sumIn = optarg ;
break ;
default:
break ;
}
}
vl::MexTensor data(context) ;
vl::MexTensor derOutput(context) ;
vl::MexTensor sum(context) ;
data.init(in[IN_DATA]) ;
data.reshape(4) ; // -> 4 dimensions
if (backMode) {
derOutput.init(in[IN_DEROUTPUT]) ;
derOutput.reshape(4) ; // -> 4 dimensions
}
if (backMode && ! vl::areCompatible(data, derOutput)) {
mexErrMsgTxt("DATA and DEROUTPUT do not have compatible formats.") ;
}
if (poolSwitchesIn == NULL) {
mexErrMsgTxt("Unpooling requires PoolSwitches") ;
}
if (!vlmxIsPlainMatrix(in[IN_SIZE],-1,-1)) {
mexErrMsgTxt("SIZE is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(in[IN_SIZE])) {
case 1:
poolHeight = mxGetPr(in[IN_SIZE])[0] ;
poolWidth = poolHeight ;
break ;
case 2:
poolHeight = mxGetPr(in[IN_SIZE])[0] ;
poolWidth = mxGetPr(in[IN_SIZE])[1] ;
break ;
default:
mexErrMsgTxt("SIZE has neither one nor two elements.") ;
}
/* Basic compatibility of Shape */
if (strideX < 1 || strideY < 1) {
mexErrMsgTxt("At least one element of STRIDE is smaller than one.") ;
}
if (poolHeight == 0 || poolWidth == 0) {
mexErrMsgTxt("A dimension of the pooling SIZE is void.") ;
}
if (unpooledHeight + (padTop+padBottom) < poolHeight ||
unpooledWidth + (padLeft+padRight) < poolWidth) {
mexErrMsgTxt("The pooling window is larger than the DATA (including padding).") ;
}
if (padLeft < 0 ||
padRight < 0 ||
padTop < 0 ||
padBottom < 0) {
mexErrMsgTxt("An element of PAD is negative.") ;
}
if (padLeft >= poolWidth ||
padRight >= poolWidth ||
padTop >= poolHeight ||
padBottom >= poolHeight) {
mexErrMsgTxt("A padding value is larger or equal to the size of the pooling window.") ;
}
if (backMode) {
unpooledHeight = derOutput.getHeight() ;
unpooledWidth = derOutput.getWidth() ;
}
if ((unpooledWidth <= 0 || unpooledHeight <= 0) && !backMode) {
mexErrMsgTxt("Unpooling requires UnpoolOutputSize") ;
}
/* Get the output Shape */
vl::TensorShape outputShape(unpooledHeight,
unpooledWidth,
data.getDepth(),
data.getSize()) ;
if (backMode && (derOutput != outputShape)) {
mexErrMsgTxt("DEROUTPUT dimensions are incompatible with X and POOL.") ;
}
/* Create output buffers */
vl::Device deviceType = data.getDeviceType() ;
vl::Type dataType = data.getDataType() ;
vl::MexTensor output(context) ;
vl::MexTensor poolSwitches(context) ;
vl::MexTensor derData(context) ;
vl::MexTensor derSum(context) ;
if (deviceType != vl::GPU) {
mexErrMsgTxt("Only GPU supported") ;
}
if (poolSwitchesIn != NULL) {
poolSwitches.init(poolSwitchesIn) ;
if (poolSwitches.getDeviceType() != deviceType) {
mexErrMsgTxt("PoolSwitches and data have different device type") ;
}
}
if (sumIn != NULL) {
sum.init(sumIn) ;
if (! vl::areCompatible(data, sum)) {
mexErrMsgTxt("DATA and SUM do not have compatible formats.") ;
}
}
if (!backMode) {
output.initWithZeros(deviceType, dataType, outputShape) ;
} else {
derData.initWithZeros(deviceType, dataType, data.getShape()) ;
if (nout > 1) {
derSum.initWithZeros(deviceType, dataType, derOutput.getShape()) ;
doDerSum = true ;
}
}
// Dispatch
int height = outputShape.getHeight() ;
int width = outputShape.getWidth() ;
int depth = data.getDepth() * data.getSize() ;
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int nthreads = width * height * depth ;
void * sumMem = sumIn ? sum.getMemory() : NULL ;
void * derSumMem = doDerSum ? derSum.getMemory() : NULL ;
if (!backMode) {
if (dataType == vl::vlTypeFloat) {
hipLaunchKernelGGL(( unpooling_max_forward_dm_kernel<float>)
, dim3(vl::divideUpwards(nthreads, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
(float*) output.getMemory(), (float const*) data.getMemory(),
(uint8_t const*) poolSwitches.getMemory(),
(float const*) sumMem,
nthreads,
pooledHeight, pooledWidth,
height, width, depth,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
} else if (dataType == vl::vlTypeDouble) {
#ifdef ENABLE_DOUBLE
hipLaunchKernelGGL(( unpooling_max_forward_dm_kernel<double>)
, dim3(vl::divideUpwards(nthreads, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
(double*) output.getMemory(), (double const*) data.getMemory(),
(uint8_t const*) poolSwitches.getMemory(),
(double const*) sumMem,
nthreads,
pooledHeight, pooledWidth,
height, width, depth,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
#endif
}
} else {
// Backward
if (dataType == vl::vlTypeFloat) {
hipLaunchKernelGGL(( unpooling_max_backward_dm_kernel<float>)
, dim3(vl::divideUpwards(nthreads, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
(float*) derData.getMemory(), (float*) derSumMem, (float const*) data.getMemory(),
(uint8_t const*) poolSwitches.getMemory(), (float const*) derOutput.getMemory(),
nthreads,
pooledHeight, pooledWidth,
height, width, depth,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
} else if (dataType == vl::vlTypeDouble) {
#ifdef ENABLE_DOUBLE
hipLaunchKernelGGL(( unpooling_max_backward_dm_kernel<double>)
, dim3(vl::divideUpwards(nthreads, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
(double*) derData.getMemory(), (double*) derSumMem, (double const*) data.getMemory(),
(uint8_t const*) poolSwitches.getMemory(), (double const*) derOutput.getMemory(),
nthreads,
pooledHeight, pooledWidth,
height, width, depth,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
#endif
}
}
hipError_t status = hipPeekAtLastError() ;
if (status != hipSuccess) {
mexErrMsgTxt(context.getLastErrorMessage().c_str()) ;
}
if (backMode) {
out[OUT_RESULT] = derData.relinquish() ;
if (doDerSum) {
out[OUT_DERSUM] = derSum.relinquish() ;
}
} else {
out[OUT_RESULT] = output.relinquish() ;
}
}
| 8eb645e19e6375e9f00226bc14a80eda1c68d5b9.cu | #include "bits/mexutils.h"
#include "bits/datamex.hpp"
#if ENABLE_GPU
#include "bits/datacu.hpp"
#endif
#include <math_constants.h>
#undef printf
#include <stdio.h>
/* option codes */
enum {
opt_stride = 0,
opt_pad,
opt_pool_switches,
opt_unpool_output_size,
opt_sum,
opt_verbose,
} ;
/* options */
vlmxOption options [] = {
{"Stride", 1, opt_stride },
{"Pad", 1, opt_pad },
{"PoolSwitches", 1, opt_pool_switches },
{"UnpoolOutputSize", 1, opt_unpool_output_size },
{"Sum", 1, opt_sum },
{"Verbose", 0, opt_verbose },
{0, 0, 0 }
} ;
/* ---------------------------------------------------------------- */
/* Context */
/* ---------------------------------------------------------------- */
vl::MexContext context ;
/*
Resetting the context here resolves a crash when MATLAB quits and
the ~Context function is implicitly called on unloading the MEX file.
*/
void atExit()
{
context.clear() ;
}
/* ---------------------------------------------------------------- */
/* unpooling_max_forward_dm_kernel */
/* ---------------------------------------------------------------- */
template <typename T> __global__ void
unpooling_max_forward_dm_kernel
(T* unpooled,
const T* data,
const uint8_t* poolSwitches,
const T* sum,
const int nthreads,
const int pooledWidth,
const int pooledHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int x_data = index ;
int y_data = x_data / width ;
int z = y_data / height ;
x_data %= width ;
y_data %= height ;
int dx = x_data + padLeft - poolWidth ;
int dy = y_data + padTop - poolHeight ;
int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ;
int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ;
int px2 = min((x_data + padLeft) / strideX, pooledWidth - 1) ;
int py2 = min((y_data + padTop) / strideY, pooledHeight - 1) ;
T unpoolValue = (T)(-CUDART_INF_F);
poolSwitches += z * pooledHeight * pooledWidth ;
data += z * pooledHeight * pooledWidth ;
for (int py = py1; py <= py2; ++py) {
for (int px = px1; px <= px2; ++px) {
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int loc = poolSwitches[py * pooledWidth + px] - 1 ;
int lx = loc % poolWidth ;
int ly = loc / poolWidth ;
if(x_data == (x1 + lx) && y_data == (y1 + ly)) {
if (data[py * pooledWidth + px] > unpoolValue) {
unpoolValue = data[py * pooledWidth + px];
}
}
}
}
if (sum) {
unpoolValue += sum[index] ;
}
unpooled[index] = unpoolValue;
}
}
/* ---------------------------------------------------------------- */
/* unpooling_max_backward_dm_kernel */
/* ---------------------------------------------------------------- */
template <typename T> __global__ void
unpooling_max_backward_dm_kernel
(T* derData,
T* derSum,
const T* data,
const uint8_t* poolSwitches,
const T* derUnpooled,
const int nthreads,
const int pooledWidth,
const int pooledHeight,
const int width,
const int height,
const int depth,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
int x_data = index ;
int y_data = x_data / width ;
int z = y_data / height ;
x_data %= width ;
y_data %= height ;
int dx = x_data + padLeft - poolWidth ;
int dy = y_data + padTop - poolHeight ;
int px1 = (dx >= 0) ? dx/strideX + 1 : 0 ;
int py1 = (dy >= 0) ? dy/strideY + 1 : 0 ;
int px2 = min((x_data + padLeft) / strideX, pooledWidth - 1) ;
int py2 = min((y_data + padTop) / strideY, pooledHeight - 1) ;
T unpoolValue = (T)(-CUDART_INF_F);
T derValue = 0;
poolSwitches += z * pooledHeight * pooledWidth ;
derData += z * pooledHeight * pooledWidth ;
data += z * pooledHeight * pooledWidth ;
int derDataIndex = -1 ;
for (int py = py1; py <= py2; ++py) {
for (int px = px1; px <= px2; ++px) {
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int loc = poolSwitches[py * pooledWidth + px] - 1 ;
int lx = loc % poolWidth ;
int ly = loc / poolWidth ;
if(x_data == (x1 + lx) && y_data == (y1 + ly)) {
if (data[py * pooledWidth + px] > unpoolValue) {
unpoolValue = data[py * pooledWidth + px];
derDataIndex = py * pooledWidth + px;
derValue = derUnpooled[index];
}
}
}
}
if (derDataIndex != -1) {
derData[derDataIndex] = derValue;
if (derSum != NULL) {
derSum[index] = derValue ;
}
}
}
}
/* ---------------------------------------------------------------- */
/* MEX driver */
/* ---------------------------------------------------------------- */
enum {
IN_DATA = 0, IN_SIZE, IN_DEROUTPUT, IN_END
} ;
enum {
OUT_RESULT = 0, OUT_DERSUM, OUT_END
} ;
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
int poolWidth ;
int poolHeight ;
int strideX = 1 ;
int strideY = 1 ;
int padLeft = 0 ;
int padRight = 0 ;
int padTop = 0 ;
int padBottom = 0 ;
bool backMode = false ;
bool doDerSum = false ;
mxArray const *poolSwitchesIn = NULL ;
mxArray const *sumIn = NULL ;
int unpooledHeight = 0;
int unpooledWidth = 0;
int verbosity = 0 ;
int opt ;
int next = IN_END ;
mxArray const *optarg ;
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
mexAtExit(atExit) ;
if (nin < 2) {
mexErrMsgTxt("The arguments are less than two.") ;
}
if (nin > 2 && vlmxIsString(in[2],-1)) {
next = 2 ;
backMode = 0 ;
} else {
backMode = (nin >= 3) ;
}
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_verbose :
++ verbosity ;
break ;
case opt_stride :
if (!vlmxIsPlainMatrix(optarg,-1,-1)) {
mexErrMsgTxt("STRIDE is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1:
strideY = (int)mxGetPr(optarg)[0] ;
strideX = strideY ;
break ;
case 2:
strideY = (int)mxGetPr(optarg)[0] ;
strideX = (int)mxGetPr(optarg)[1] ;
break ;
default:
mexErrMsgTxt("STRIDE has neither one nor two elements.") ;
}
break ;
case opt_pad :
if (!vlmxIsPlainMatrix(optarg,-1,-1)) {
mexErrMsgTxt("PAD is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1:
padLeft = (int)mxGetPr(optarg)[0] ;
padRight = padLeft ;
padTop = padLeft ;
padBottom = padLeft ;
break ;
case 4:
padTop = (int)mxGetPr(optarg)[0] ;
padBottom = (int)mxGetPr(optarg)[1] ;
padLeft = (int)mxGetPr(optarg)[2] ;
padRight = (int)mxGetPr(optarg)[3] ;
break ;
default:
mexErrMsgTxt("PAD has neither one nor four elements.") ;
}
break;
case opt_pool_switches :
poolSwitchesIn = optarg ;
break ;
case opt_unpool_output_size :
if (!vlmxIsPlainMatrix(optarg,-1,-1)) {
mexErrMsgTxt("UNPOOLOUTPUTSIZE is not a plain matrix.") ;
}
if (mxGetNumberOfElements(optarg) >= 2) {
unpooledHeight = (int)mxGetPr(optarg)[0] ;
unpooledWidth = (int)mxGetPr(optarg)[1] ;
} else {
mexErrMsgTxt("UNPOOLOUTPUTSIZE has less than 2 elements") ;
}
break ;
case opt_sum :
sumIn = optarg ;
break ;
default:
break ;
}
}
vl::MexTensor data(context) ;
vl::MexTensor derOutput(context) ;
vl::MexTensor sum(context) ;
data.init(in[IN_DATA]) ;
data.reshape(4) ; // -> 4 dimensions
if (backMode) {
derOutput.init(in[IN_DEROUTPUT]) ;
derOutput.reshape(4) ; // -> 4 dimensions
}
if (backMode && ! vl::areCompatible(data, derOutput)) {
mexErrMsgTxt("DATA and DEROUTPUT do not have compatible formats.") ;
}
if (poolSwitchesIn == NULL) {
mexErrMsgTxt("Unpooling requires PoolSwitches") ;
}
if (!vlmxIsPlainMatrix(in[IN_SIZE],-1,-1)) {
mexErrMsgTxt("SIZE is not a plain matrix.") ;
}
switch (mxGetNumberOfElements(in[IN_SIZE])) {
case 1:
poolHeight = mxGetPr(in[IN_SIZE])[0] ;
poolWidth = poolHeight ;
break ;
case 2:
poolHeight = mxGetPr(in[IN_SIZE])[0] ;
poolWidth = mxGetPr(in[IN_SIZE])[1] ;
break ;
default:
mexErrMsgTxt("SIZE has neither one nor two elements.") ;
}
/* Basic compatibility of Shape */
if (strideX < 1 || strideY < 1) {
mexErrMsgTxt("At least one element of STRIDE is smaller than one.") ;
}
if (poolHeight == 0 || poolWidth == 0) {
mexErrMsgTxt("A dimension of the pooling SIZE is void.") ;
}
if (unpooledHeight + (padTop+padBottom) < poolHeight ||
unpooledWidth + (padLeft+padRight) < poolWidth) {
mexErrMsgTxt("The pooling window is larger than the DATA (including padding).") ;
}
if (padLeft < 0 ||
padRight < 0 ||
padTop < 0 ||
padBottom < 0) {
mexErrMsgTxt("An element of PAD is negative.") ;
}
if (padLeft >= poolWidth ||
padRight >= poolWidth ||
padTop >= poolHeight ||
padBottom >= poolHeight) {
mexErrMsgTxt("A padding value is larger or equal to the size of the pooling window.") ;
}
if (backMode) {
unpooledHeight = derOutput.getHeight() ;
unpooledWidth = derOutput.getWidth() ;
}
if ((unpooledWidth <= 0 || unpooledHeight <= 0) && !backMode) {
mexErrMsgTxt("Unpooling requires UnpoolOutputSize") ;
}
/* Get the output Shape */
vl::TensorShape outputShape(unpooledHeight,
unpooledWidth,
data.getDepth(),
data.getSize()) ;
if (backMode && (derOutput != outputShape)) {
mexErrMsgTxt("DEROUTPUT dimensions are incompatible with X and POOL.") ;
}
/* Create output buffers */
vl::Device deviceType = data.getDeviceType() ;
vl::Type dataType = data.getDataType() ;
vl::MexTensor output(context) ;
vl::MexTensor poolSwitches(context) ;
vl::MexTensor derData(context) ;
vl::MexTensor derSum(context) ;
if (deviceType != vl::GPU) {
mexErrMsgTxt("Only GPU supported") ;
}
if (poolSwitchesIn != NULL) {
poolSwitches.init(poolSwitchesIn) ;
if (poolSwitches.getDeviceType() != deviceType) {
mexErrMsgTxt("PoolSwitches and data have different device type") ;
}
}
if (sumIn != NULL) {
sum.init(sumIn) ;
if (! vl::areCompatible(data, sum)) {
mexErrMsgTxt("DATA and SUM do not have compatible formats.") ;
}
}
if (!backMode) {
output.initWithZeros(deviceType, dataType, outputShape) ;
} else {
derData.initWithZeros(deviceType, dataType, data.getShape()) ;
if (nout > 1) {
derSum.initWithZeros(deviceType, dataType, derOutput.getShape()) ;
doDerSum = true ;
}
}
// Dispatch
int height = outputShape.getHeight() ;
int width = outputShape.getWidth() ;
int depth = data.getDepth() * data.getSize() ;
int pooledWidth = (width + (padLeft+padRight) - poolWidth)/strideX + 1 ;
int pooledHeight = (height + (padTop+padBottom) - poolHeight)/strideY + 1 ;
int nthreads = width * height * depth ;
void * sumMem = sumIn ? sum.getMemory() : NULL ;
void * derSumMem = doDerSum ? derSum.getMemory() : NULL ;
if (!backMode) {
if (dataType == vl::vlTypeFloat) {
unpooling_max_forward_dm_kernel<float>
<<< vl::divideUpwards(nthreads, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
((float*) output.getMemory(), (float const*) data.getMemory(),
(uint8_t const*) poolSwitches.getMemory(),
(float const*) sumMem,
nthreads,
pooledHeight, pooledWidth,
height, width, depth,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
} else if (dataType == vl::vlTypeDouble) {
#ifdef ENABLE_DOUBLE
unpooling_max_forward_dm_kernel<double>
<<< vl::divideUpwards(nthreads, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
((double*) output.getMemory(), (double const*) data.getMemory(),
(uint8_t const*) poolSwitches.getMemory(),
(double const*) sumMem,
nthreads,
pooledHeight, pooledWidth,
height, width, depth,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
#endif
}
} else {
// Backward
if (dataType == vl::vlTypeFloat) {
unpooling_max_backward_dm_kernel<float>
<<< vl::divideUpwards(nthreads, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
((float*) derData.getMemory(), (float*) derSumMem, (float const*) data.getMemory(),
(uint8_t const*) poolSwitches.getMemory(), (float const*) derOutput.getMemory(),
nthreads,
pooledHeight, pooledWidth,
height, width, depth,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
} else if (dataType == vl::vlTypeDouble) {
#ifdef ENABLE_DOUBLE
unpooling_max_backward_dm_kernel<double>
<<< vl::divideUpwards(nthreads, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
((double*) derData.getMemory(), (double*) derSumMem, (double const*) data.getMemory(),
(uint8_t const*) poolSwitches.getMemory(), (double const*) derOutput.getMemory(),
nthreads,
pooledHeight, pooledWidth,
height, width, depth,
poolHeight, poolWidth,
strideY, strideX,
padTop, padLeft);
#endif
}
}
cudaError_t status = cudaPeekAtLastError() ;
if (status != cudaSuccess) {
mexErrMsgTxt(context.getLastErrorMessage().c_str()) ;
}
if (backMode) {
out[OUT_RESULT] = derData.relinquish() ;
if (doDerSum) {
out[OUT_DERSUM] = derSum.relinquish() ;
}
} else {
out[OUT_RESULT] = output.relinquish() ;
}
}
|
07eb284d99f39d4e1e2782b2bc16e714754d7e8b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
void initialData(float *ip, const int N);
void sumArraysOnHost(float *A, float *B, float *C, const int N, const int offset);
void verifyResult(float *hostRes, float *deviceRes, const int N);
__global__ void sumArraysOnDeviceOffset(float *A, float *B, float *C, const int N, const int offset);
__global__ void sumArraysOnDeviceOffsetUnroll2(float *A, float *B, float *C, const int N, const int offset);
__global__ void sumArraysOnDeviceOffsetUnroll4(float *A, float *B, float *C, const int N, const int offset);
__global__ void sumArraysReadonlyCache(const float * __restrict__ A, const float * __restrict__ B, float * __restrict__ C, const int N, const int offset);
#define CHECK(call) { \
const hipError_t error = call; \
if (error != hipSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
} \
int main(int argc, char **argv) {
int power = 20;
if (argc>1) power = atoi(argv[1]);
int blockSize = 512;
if (argc>2) blockSize = atoi(argv[2]);
int offset = 0;
if (argc>3) offset = atoi(argv[3]);
int nElem = 1<<power;
size_t nBytes = nElem * sizeof(float);
clock_t start, end;
double time;
printf("Vector size %d\n", nElem);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// allocate host memory
float *h_A, *h_B, *h_C, *h_C_gpu;
h_A = (float *) malloc(nBytes);
h_B = (float *) malloc(nBytes);
h_C = (float *) malloc(nBytes);
h_C_gpu = (float *) malloc(nBytes);
// initial data (in CPU mem)
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(h_C, 0, nBytes);
// compute on CPU
start = clock();
sumArraysOnHost(h_A, h_B, h_C, nElem, offset);
end = clock();
time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("CPU execution: %.4f ms\n", time * 1000);
// allocate device memory
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((float**)&d_A, nBytes));
CHECK(hipMalloc((float**)&d_B, nBytes));
CHECK(hipMalloc((float**)&d_C, nBytes));
// copy data from CPU to GPU
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
// configuration
dim3 block(blockSize);
dim3 grid((nElem+block.x-1)/block.x);
// 1. no unrolling
hipMemset(d_C, 0, nBytes);
memset(h_C_gpu, 0, nBytes);
start = clock();
hipLaunchKernelGGL(( sumArraysOnDeviceOffset), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset);
CHECK(hipDeviceSynchronize()); // synchronize kernel only for debugging!
end = clock();
time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("no unrolling <<< %4d, %4d >>> offset %d elapsed %f ms\n",
grid.x, block.x, offset, time * 1000);
// check result
CHECK(hipMemcpy(h_C_gpu, d_C, nBytes, hipMemcpyDeviceToHost));
verifyResult(h_C, h_C_gpu, nElem);
// 1. no unrolling; read-only cache
hipMemset(d_C, 0, nBytes);
memset(h_C_gpu, 0, nBytes);
start = clock();
hipLaunchKernelGGL(( sumArraysReadonlyCache), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset);
CHECK(hipDeviceSynchronize()); // synchronize kernel only for debugging!
end = clock();
time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("read-only cache <<< %4d, %4d >>> offset %d elapsed %f ms\n",
grid.x, block.x, offset, time * 1000);
// check result
CHECK(hipMemcpy(h_C_gpu, d_C, nBytes, hipMemcpyDeviceToHost));
verifyResult(h_C, h_C_gpu, nElem);
// 2. x2 unrolling
hipMemset(d_C, 0, nBytes);
memset(h_C_gpu, 0, nBytes);
start = clock();
hipLaunchKernelGGL(( sumArraysOnDeviceOffsetUnroll2), dim3(grid.x / 2), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset);
CHECK(hipDeviceSynchronize()); // synchronize kernel only for debugging!
end = clock();
time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("unroll2 <<< %4d, %4d >>> offset %d elapsed %f ms\n",
grid.x / 2, block.x, offset, time * 1000);
// check result
CHECK(hipMemcpy(h_C_gpu, d_C, nBytes, hipMemcpyDeviceToHost));
verifyResult(h_C, h_C_gpu, nElem);
// 3. x4 unorlling
hipMemset(d_C, 0, nBytes);
memset(h_C_gpu, 0, nBytes);
start = clock();
hipLaunchKernelGGL(( sumArraysOnDeviceOffsetUnroll4), dim3(grid.x / 4), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset);
CHECK(hipDeviceSynchronize()); // synchronize kernel only for debugging!
end = clock();
time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("unroll4 <<< %4d, %4d >>> offset %d elapsed %f ms\n",
grid.x / 4, block.x, offset, time * 1000);
// check result
CHECK(hipMemcpy(h_C_gpu, d_C, nBytes, hipMemcpyDeviceToHost));
verifyResult(h_C, h_C_gpu, nElem);
// free host mem
free(h_A);
free(h_B);
free(h_C);
free(h_C_gpu);
// free device mem
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
// clean up all resources
CHECK(hipDeviceReset());
return 0;
}
/**********CUDA kernels**********/
__global__ void sumArraysOnDeviceOffset(
float *A, float *B, float *C,
const int N, const int offset) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int k = idx + offset;
if (k < N) C[idx] = A[k] + B[k];
}
__global__ void sumArraysOnDeviceOffsetUnroll2(
float *A, float *B, float *C,
const int N, const int offset) {
int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
int k = idx + offset;
if (k < N) C[idx] = A[k] + B[k];
if (k + blockDim.x < N) {
C[idx + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x];
}
}
__global__ void sumArraysOnDeviceOffsetUnroll4(
float *A, float *B, float *C,
const int N, const int offset) {
int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
int k = idx + offset;
if (k < N) C[idx] = A[k] + B[k];
if (k + blockDim.x < N) {
C[idx + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x];
}
if (k + 2 * blockDim.x < N) {
C[idx + 2 * blockDim.x] = A[k + 2 * blockDim.x] + B[k + 2 * blockDim.x];
}
if (k + 3 * blockDim.x < N) {
C[idx + 3 * blockDim.x] = A[k + 3 * blockDim.x] + B[k + 3 * blockDim.x];
}
}
__global__ void sumArraysReadonlyCache(
const float * __restrict__ A,
const float * __restrict__ B,
float * __restrict__ C,
const int N, const int offset) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int k = idx + offset;
if (k < N) C[idx] = __ldg(&A[k]) + __ldg(&B[k]);
}
/**********host functions**********/
void initialData(float *ip, const int N) {
// generate different seed for random number
time_t t;
srand((unsigned int) time(&t));
for (int i = 0; i < N; i++) {
ip[i] = (float) (rand() & 0xFF) / 10.f;
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N, const int offset) {
for (int i = 0, k = offset; k < N; i++, k++) {
C[i] = A[k] + B[k];
}
}
void verifyResult(float *hostRes, float *deviceRes, const int N) {
double eps = 1e-8;
for (int i = 0; i < N; i++) {
if (abs(hostRes[i] - deviceRes[i]) > eps) {
printf("Arrays do not match:\n");
printf("host %5.2f gpu %5.2f at array index %d\n", hostRes[i], deviceRes[i], i);
return;
}
}
return;
} | 07eb284d99f39d4e1e2782b2bc16e714754d7e8b.cu | #include <stdio.h>
#include <cuda_runtime.h>
void initialData(float *ip, const int N);
void sumArraysOnHost(float *A, float *B, float *C, const int N, const int offset);
void verifyResult(float *hostRes, float *deviceRes, const int N);
__global__ void sumArraysOnDeviceOffset(float *A, float *B, float *C, const int N, const int offset);
__global__ void sumArraysOnDeviceOffsetUnroll2(float *A, float *B, float *C, const int N, const int offset);
__global__ void sumArraysOnDeviceOffsetUnroll4(float *A, float *B, float *C, const int N, const int offset);
__global__ void sumArraysReadonlyCache(const float * __restrict__ A, const float * __restrict__ B, float * __restrict__ C, const int N, const int offset);
#define CHECK(call) { \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
} \
int main(int argc, char **argv) {
int power = 20;
if (argc>1) power = atoi(argv[1]);
int blockSize = 512;
if (argc>2) blockSize = atoi(argv[2]);
int offset = 0;
if (argc>3) offset = atoi(argv[3]);
int nElem = 1<<power;
size_t nBytes = nElem * sizeof(float);
clock_t start, end;
double time;
printf("Vector size %d\n", nElem);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// allocate host memory
float *h_A, *h_B, *h_C, *h_C_gpu;
h_A = (float *) malloc(nBytes);
h_B = (float *) malloc(nBytes);
h_C = (float *) malloc(nBytes);
h_C_gpu = (float *) malloc(nBytes);
// initial data (in CPU mem)
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(h_C, 0, nBytes);
// compute on CPU
start = clock();
sumArraysOnHost(h_A, h_B, h_C, nElem, offset);
end = clock();
time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("CPU execution: %.4f ms\n", time * 1000);
// allocate device memory
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((float**)&d_A, nBytes));
CHECK(cudaMalloc((float**)&d_B, nBytes));
CHECK(cudaMalloc((float**)&d_C, nBytes));
// copy data from CPU to GPU
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
// configuration
dim3 block(blockSize);
dim3 grid((nElem+block.x-1)/block.x);
// 1. no unrolling
cudaMemset(d_C, 0, nBytes);
memset(h_C_gpu, 0, nBytes);
start = clock();
sumArraysOnDeviceOffset<<<grid, block>>>(d_A, d_B, d_C, nElem, offset);
CHECK(cudaDeviceSynchronize()); // synchronize kernel only for debugging!
end = clock();
time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("no unrolling <<< %4d, %4d >>> offset %d elapsed %f ms\n",
grid.x, block.x, offset, time * 1000);
// check result
CHECK(cudaMemcpy(h_C_gpu, d_C, nBytes, cudaMemcpyDeviceToHost));
verifyResult(h_C, h_C_gpu, nElem);
// 1. no unrolling; read-only cache
cudaMemset(d_C, 0, nBytes);
memset(h_C_gpu, 0, nBytes);
start = clock();
sumArraysReadonlyCache<<<grid, block>>>(d_A, d_B, d_C, nElem, offset);
CHECK(cudaDeviceSynchronize()); // synchronize kernel only for debugging!
end = clock();
time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("read-only cache <<< %4d, %4d >>> offset %d elapsed %f ms\n",
grid.x, block.x, offset, time * 1000);
// check result
CHECK(cudaMemcpy(h_C_gpu, d_C, nBytes, cudaMemcpyDeviceToHost));
verifyResult(h_C, h_C_gpu, nElem);
// 2. x2 unrolling
cudaMemset(d_C, 0, nBytes);
memset(h_C_gpu, 0, nBytes);
start = clock();
sumArraysOnDeviceOffsetUnroll2<<<grid.x / 2, block>>>(d_A, d_B, d_C, nElem, offset);
CHECK(cudaDeviceSynchronize()); // synchronize kernel only for debugging!
end = clock();
time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("unroll2 <<< %4d, %4d >>> offset %d elapsed %f ms\n",
grid.x / 2, block.x, offset, time * 1000);
// check result
CHECK(cudaMemcpy(h_C_gpu, d_C, nBytes, cudaMemcpyDeviceToHost));
verifyResult(h_C, h_C_gpu, nElem);
// 3. x4 unorlling
cudaMemset(d_C, 0, nBytes);
memset(h_C_gpu, 0, nBytes);
start = clock();
sumArraysOnDeviceOffsetUnroll4<<<grid.x / 4, block>>>(d_A, d_B, d_C, nElem, offset);
CHECK(cudaDeviceSynchronize()); // synchronize kernel only for debugging!
end = clock();
time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("unroll4 <<< %4d, %4d >>> offset %d elapsed %f ms\n",
grid.x / 4, block.x, offset, time * 1000);
// check result
CHECK(cudaMemcpy(h_C_gpu, d_C, nBytes, cudaMemcpyDeviceToHost));
verifyResult(h_C, h_C_gpu, nElem);
// free host mem
free(h_A);
free(h_B);
free(h_C);
free(h_C_gpu);
// free device mem
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
// clean up all resources
CHECK(cudaDeviceReset());
return 0;
}
/**********CUDA kernels**********/
__global__ void sumArraysOnDeviceOffset(
float *A, float *B, float *C,
const int N, const int offset) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int k = idx + offset;
if (k < N) C[idx] = A[k] + B[k];
}
__global__ void sumArraysOnDeviceOffsetUnroll2(
float *A, float *B, float *C,
const int N, const int offset) {
int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
int k = idx + offset;
if (k < N) C[idx] = A[k] + B[k];
if (k + blockDim.x < N) {
C[idx + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x];
}
}
__global__ void sumArraysOnDeviceOffsetUnroll4(
float *A, float *B, float *C,
const int N, const int offset) {
int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
int k = idx + offset;
if (k < N) C[idx] = A[k] + B[k];
if (k + blockDim.x < N) {
C[idx + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x];
}
if (k + 2 * blockDim.x < N) {
C[idx + 2 * blockDim.x] = A[k + 2 * blockDim.x] + B[k + 2 * blockDim.x];
}
if (k + 3 * blockDim.x < N) {
C[idx + 3 * blockDim.x] = A[k + 3 * blockDim.x] + B[k + 3 * blockDim.x];
}
}
__global__ void sumArraysReadonlyCache(
const float * __restrict__ A,
const float * __restrict__ B,
float * __restrict__ C,
const int N, const int offset) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int k = idx + offset;
if (k < N) C[idx] = __ldg(&A[k]) + __ldg(&B[k]);
}
/**********host functions**********/
void initialData(float *ip, const int N) {
// generate different seed for random number
time_t t;
srand((unsigned int) time(&t));
for (int i = 0; i < N; i++) {
ip[i] = (float) (rand() & 0xFF) / 10.f;
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N, const int offset) {
for (int i = 0, k = offset; k < N; i++, k++) {
C[i] = A[k] + B[k];
}
}
void verifyResult(float *hostRes, float *deviceRes, const int N) {
double eps = 1e-8;
for (int i = 0; i < N; i++) {
if (abs(hostRes[i] - deviceRes[i]) > eps) {
printf("Arrays do not match:\n");
printf("host %5.2f gpu %5.2f at array index %d\n", hostRes[i], deviceRes[i], i);
return;
}
}
return;
} |
b1c5003d2a2b8a80e15bd51c77e48e9d54c98efb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal d
*/
#include "common_magma.h"
#include "commonblas_d.h"
static __device__ void daxpy(double a,double *b, double *c) {
c[0] += a * b[0];
c[1] += a * b[1];
c[2] += a * b[2];
c[3] += a * b[3];
c[4] += a * b[4];
c[5] += a * b[5];
c[6] += a * b[6];
c[7] += a * b[7];
c[8] += a * b[8];
c[9] += a * b[9];
c[10] += a * b[10];
c[11] += a * b[11];
c[12] += a * b[12];
c[13] += a * b[13];
c[14] += a * b[14];
c[15] += a * b[15];
}
__global__ void
dgemm_kernel_T_T_64_16_16_16_4_v2(double *C, const double *A, const double *B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta)
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose:
========
This routine computes
C = alpha* A^T*B^T + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This code should run for any matrix size.
This kernel outperforms cuda-2.2 when m,n,k >=512
=============================================================== */
__shared__ double Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int iby = ((blockIdx.y + blockIdx.x ) % (n/16))*16;
const int idt = ty * 16 + tx;
int ibx = blockIdx.x *64+idt;
//int iby = blockIdx.y *16;
A += ibx ;
B+=tx+__mul24(iby+ty,ldb);
C += __mul24(ibx ,ldc) + iby;
const double *Bend = B + k;
double Cb[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
do {
double Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
Bb[tx][ty+0] = B[0*ldb];
Bb[tx][ty+4] = B[4*ldb];
Bb[tx][ty+8] = B[8*ldb];
Bb[tx][ty+12] = B[12*ldb];
__syncthreads();
A += 4 * lda;
daxpy(Ab[0], &Bb[0][0], Cb); Ab[0] = A[0*lda];
daxpy(Ab[1], &Bb[1][0], Cb); Ab[1] = A[1*lda];
daxpy(Ab[2], &Bb[2][0], Cb); Ab[2] = A[2*lda];
daxpy(Ab[3], &Bb[3][0], Cb); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy(Ab[0], &Bb[4][0], Cb); Ab[0] = A[0*lda];
daxpy(Ab[1], &Bb[5][0], Cb); Ab[1] = A[1*lda];
daxpy(Ab[2], &Bb[6][0], Cb); Ab[2] = A[2*lda];
daxpy(Ab[3], &Bb[7][0], Cb); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy(Ab[0], &Bb[8][0], Cb); Ab[0] = A[0*lda];
daxpy(Ab[1], &Bb[9][0], Cb); Ab[1] = A[1*lda];
daxpy(Ab[2], &Bb[10][0], Cb); Ab[2] = A[2*lda];
daxpy(Ab[3], &Bb[11][0], Cb); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy(Ab[0], &Bb[12][0], Cb);
daxpy(Ab[1], &Bb[13][0], Cb);
daxpy(Ab[2], &Bb[14][0], Cb);
daxpy(Ab[3], &Bb[15][0], Cb);
B += 16;
__syncthreads();
} while (B < Bend);
#pragma unroll 16
for (int i = 0; i < 16; i++) {
C[i] =alpha*Cb[i] + beta * C[i];
}
}
extern "C" void
magmablas_dgemm_kernel_T_T_64_16_16_16_4_v2(double *C,
const double *A,
const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta)
{
dim3 threads( 16, 4 );
dim3 grid(m/64,n/16);
hipLaunchKernelGGL(( dgemm_kernel_T_T_64_16_16_16_4_v2), dim3(grid), dim3(threads), 0, magma_stream , C, A, B,
m, n, k,
lda, ldb, ldc,
alpha, beta);
}
| b1c5003d2a2b8a80e15bd51c77e48e9d54c98efb.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@precisions normal d
*/
#include "common_magma.h"
#include "commonblas_d.h"
static __device__ void daxpy(double a,double *b, double *c) {
c[0] += a * b[0];
c[1] += a * b[1];
c[2] += a * b[2];
c[3] += a * b[3];
c[4] += a * b[4];
c[5] += a * b[5];
c[6] += a * b[6];
c[7] += a * b[7];
c[8] += a * b[8];
c[9] += a * b[9];
c[10] += a * b[10];
c[11] += a * b[11];
c[12] += a * b[12];
c[13] += a * b[13];
c[14] += a * b[14];
c[15] += a * b[15];
}
__global__ void
dgemm_kernel_T_T_64_16_16_16_4_v2(double *C, const double *A, const double *B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta)
{
/* -- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
Purpose:
========
This routine computes
C = alpha* A^T*B^T + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This code should run for any matrix size.
This kernel outperforms cuda-2.2 when m,n,k >=512
=============================================================== */
__shared__ double Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int iby = ((blockIdx.y + blockIdx.x ) % (n/16))*16;
const int idt = ty * 16 + tx;
int ibx = blockIdx.x *64+idt;
//int iby = blockIdx.y *16;
A += ibx ;
B+=tx+__mul24(iby+ty,ldb);
C += __mul24(ibx ,ldc) + iby;
const double *Bend = B + k;
double Cb[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
do {
double Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
Bb[tx][ty+0] = B[0*ldb];
Bb[tx][ty+4] = B[4*ldb];
Bb[tx][ty+8] = B[8*ldb];
Bb[tx][ty+12] = B[12*ldb];
__syncthreads();
A += 4 * lda;
daxpy(Ab[0], &Bb[0][0], Cb); Ab[0] = A[0*lda];
daxpy(Ab[1], &Bb[1][0], Cb); Ab[1] = A[1*lda];
daxpy(Ab[2], &Bb[2][0], Cb); Ab[2] = A[2*lda];
daxpy(Ab[3], &Bb[3][0], Cb); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy(Ab[0], &Bb[4][0], Cb); Ab[0] = A[0*lda];
daxpy(Ab[1], &Bb[5][0], Cb); Ab[1] = A[1*lda];
daxpy(Ab[2], &Bb[6][0], Cb); Ab[2] = A[2*lda];
daxpy(Ab[3], &Bb[7][0], Cb); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy(Ab[0], &Bb[8][0], Cb); Ab[0] = A[0*lda];
daxpy(Ab[1], &Bb[9][0], Cb); Ab[1] = A[1*lda];
daxpy(Ab[2], &Bb[10][0], Cb); Ab[2] = A[2*lda];
daxpy(Ab[3], &Bb[11][0], Cb); Ab[3] = A[3*lda];
A += 4 * lda;
daxpy(Ab[0], &Bb[12][0], Cb);
daxpy(Ab[1], &Bb[13][0], Cb);
daxpy(Ab[2], &Bb[14][0], Cb);
daxpy(Ab[3], &Bb[15][0], Cb);
B += 16;
__syncthreads();
} while (B < Bend);
#pragma unroll 16
for (int i = 0; i < 16; i++) {
C[i] =alpha*Cb[i] + beta * C[i];
}
}
extern "C" void
magmablas_dgemm_kernel_T_T_64_16_16_16_4_v2(double *C,
const double *A,
const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta)
{
dim3 threads( 16, 4 );
dim3 grid(m/64,n/16);
dgemm_kernel_T_T_64_16_16_16_4_v2<<< grid, threads, 0, magma_stream >>>(C, A, B,
m, n, k,
lda, ldb, ldc,
alpha, beta);
}
|
65d692f6b8eeece28c1a7505a63747926fcfb476.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "XSbench_header.cuh"
////////////////////////////////////////////////////////////////////////////////////
// BASELINE FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// All "baseline" code is at the top of this file. The baseline code is a simple
// port of the original CPU OpenMP code to CUDA with few significant changes or
// optimizations made. Following these functions are a number of optimized variants,
// which each deploy a different combination of optimizations strategies. By
// default, XSBench will only run the baseline implementation. Optimized variants
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_baseline(Inputs in, SimulationData GSD, int mype, hipStream_t stream_app)
{
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Running baseline event-based simulation...\n");
int nthreads = 32;
int nblocks = ceil( (double) in.lookups / 32.0);
hipLaunchKernelGGL(( xs_lookup_kernel_baseline), dim3(nblocks), dim3(nthreads), 0, stream_app, in, GSD );
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Reducing verification results...\n");
/* unsigned long verification_scalar = thrust::reduce(GSD.verification, GSD.verification + in.lookups, 0); */
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
/* return verification_scalar; */
return 0;
}
// In this kernel, we perform a single lookup with each thread. Threads within a warp
// do not really have any relation to each other, and divergence due to high nuclide count fuel
// material lookups are costly. This kernel constitutes baseline performance.
__global__ void xs_lookup_kernel_baseline(Inputs in, SimulationData GSD )
{
// The lookup ID. Used to set the seed, and to store the verification value
const int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= in.lookups )
return;
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
GSD.num_nucs, // 1-D array with number of nuclides per material
GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material
GSD.unionized_energy_array, // 1-D Unionized energy array
GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
GSD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we have each thread
// write to its thread_id index in an array, which we will reduce
// with a thrust reduction kernel after the main simulation kernel.
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
GSD.verification[i] = max_idx+1;
}
// Calculates the microscopic cross section for a given nuclide & energy
__device__ void calculate_micro_xs( double p_energy, int nuc, long n_isotopes,
long n_gridpoints,
double * __restrict__ egrid, int * __restrict__ index_data,
NuclideGridPoint * __restrict__ nuclide_grids,
long idx, double * __restrict__ xs_vector, int grid_type, int hash_bins ){
// Variables
double f;
NuclideGridPoint * low, * high;
// If using only the nuclide grid, we must perform a binary search
// to find the energy location in this particular nuclide's grid.
if( grid_type == NUCLIDE )
{
// Perform binary search on the Nuclide Grid to find the index
idx = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], 0, n_gridpoints-1);
// pull ptr from nuclide grid and check to ensure that
// we're not reading off the end of the nuclide's grid
if( idx == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + idx - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + idx];
}
else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed.
{
// pull ptr from energy grid and check to ensure that
// we're not reading off the end of the nuclide's grid
if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]];
}
else // Hash grid
{
// load lower bounding index
int u_low = index_data[idx * n_isotopes + nuc];
// Determine higher bounding index
int u_high;
if( idx == hash_bins - 1 )
u_high = n_gridpoints - 1;
else
u_high = index_data[(idx+1)*n_isotopes + nuc] + 1;
// Check edge cases to make sure energy is actually between these
// Then, if things look good, search for gridpoint in the nuclide grid
// within the lower and higher limits we've calculated.
double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy;
double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy;
int lower;
if( p_energy <= e_low )
lower = 0;
else if( p_energy >= e_high )
lower = n_gridpoints - 1;
else
lower = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], u_low, u_high);
if( lower == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + lower - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + lower];
}
high = low + 1;
// calculate the re-useable interpolation factor
f = (high->energy - p_energy) / (high->energy - low->energy);
// Total XS
xs_vector[0] = high->total_xs - f * (high->total_xs - low->total_xs);
// Elastic XS
xs_vector[1] = high->elastic_xs - f * (high->elastic_xs - low->elastic_xs);
// Absorbtion XS
xs_vector[2] = high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs);
// Fission XS
xs_vector[3] = high->fission_xs - f * (high->fission_xs - low->fission_xs);
// Nu Fission XS
xs_vector[4] = high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs);
}
// Calculates macroscopic cross section based on a given material & energy
__device__ void calculate_macro_xs( double p_energy, int mat, long n_isotopes,
long n_gridpoints, int * __restrict__ num_nucs,
double * __restrict__ concs,
double * __restrict__ egrid, int * __restrict__ index_data,
NuclideGridPoint * __restrict__ nuclide_grids,
int * __restrict__ mats,
double * __restrict__ macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){
int p_nuc; // the nuclide we are looking up
long idx = -1;
double conc; // the concentration of the nuclide in the material
// cleans out macro_xs_vector
for( int k = 0; k < 5; k++ )
macro_xs_vector[k] = 0;
// If we are using the unionized energy grid (UEG), we only
// need to perform 1 binary search per macroscopic lookup.
// If we are using the nuclide grid search, it will have to be
// done inside of the "calculate_micro_xs" function for each different
// nuclide in the material.
if( grid_type == UNIONIZED )
idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid);
else if( grid_type == HASH )
{
double du = 1.0 / hash_bins;
idx = p_energy / du;
}
// Once we find the pointer array on the UEG, we can pull the data
// from the respective nuclide grids, as well as the nuclide
// concentration data for the material
// Each nuclide from the material needs to have its micro-XS array
// looked up & interpolatied (via calculate_micro_xs). Then, the
// micro XS is multiplied by the concentration of that nuclide
// in the material, and added to the total macro XS array.
// (Independent -- though if parallelizing, must use atomic operations
// or otherwise control access to the xs_vector and macro_xs_vector to
// avoid simulataneous writing to the same data structure)
for( int j = 0; j < num_nucs[mat]; j++ )
{
double xs_vector[5];
p_nuc = mats[mat*max_num_nucs + j];
conc = concs[mat*max_num_nucs + j];
calculate_micro_xs( p_energy, p_nuc, n_isotopes,
n_gridpoints, egrid, index_data,
nuclide_grids, idx, xs_vector, grid_type, hash_bins );
for( int k = 0; k < 5; k++ )
macro_xs_vector[k] += xs_vector[k] * conc;
}
}
// binary search for energy on unionized energy grid
// returns lower index
__device__ long grid_search( long n, double quarry, double * __restrict__ A)
{
long lowerLimit = 0;
long upperLimit = n-1;
long examinationPoint;
long length = upperLimit - lowerLimit;
while( length > 1 )
{
examinationPoint = lowerLimit + ( length / 2 );
if( A[examinationPoint] > quarry )
upperLimit = examinationPoint;
else
lowerLimit = examinationPoint;
length = upperLimit - lowerLimit;
}
return lowerLimit;
}
// binary search for energy on nuclide energy grid
__host__ __device__ long grid_search_nuclide( long n, double quarry, NuclideGridPoint * A, long low, long high)
{
long lowerLimit = low;
long upperLimit = high;
long examinationPoint;
long length = upperLimit - lowerLimit;
while( length > 1 )
{
examinationPoint = lowerLimit + ( length / 2 );
if( A[examinationPoint].energy > quarry )
upperLimit = examinationPoint;
else
lowerLimit = examinationPoint;
length = upperLimit - lowerLimit;
}
return lowerLimit;
}
// picks a material based on a probabilistic distribution
__device__ int pick_mat( uint64_t * seed )
{
// I have a nice spreadsheet supporting these numbers. They are
// the fractions (by volume) of material in the core. Not a
// *perfect* approximation of where XS lookups are going to occur,
// but this will do a good job of biasing the system nonetheless.
// Also could be argued that doing fractions by weight would be
// a better approximation, but volume does a good enough job for now.
double dist[12];
dist[0] = 0.140; // fuel
dist[1] = 0.052; // cladding
dist[2] = 0.275; // cold, borated water
dist[3] = 0.134; // hot, borated water
dist[4] = 0.154; // RPV
dist[5] = 0.064; // Lower, radial reflector
dist[6] = 0.066; // Upper reflector / top plate
dist[7] = 0.055; // bottom plate
dist[8] = 0.008; // bottom nozzle
dist[9] = 0.015; // top nozzle
dist[10] = 0.025; // top of fuel assemblies
dist[11] = 0.013; // bottom of fuel assemblies
double roll = LCG_random_double(seed);
// makes a pick based on the distro
for( int i = 0; i < 12; i++ )
{
double running = 0;
for( int j = i; j > 0; j-- )
running += dist[j];
if( roll < running )
return i;
}
return 0;
}
__host__ __device__ double LCG_random_double(uint64_t * seed)
{
// LCG parameters
const uint64_t m = 9223372036854775808ULL; // 2^63
const uint64_t a = 2806196910506780709ULL;
const uint64_t c = 1ULL;
*seed = (a * (*seed) + c) % m;
return (double) (*seed) / (double) m;
}
__device__ uint64_t fast_forward_LCG(uint64_t seed, uint64_t n)
{
// LCG parameters
const uint64_t m = 9223372036854775808ULL; // 2^63
uint64_t a = 2806196910506780709ULL;
uint64_t c = 1ULL;
n = n % m;
uint64_t a_new = 1;
uint64_t c_new = 0;
while(n > 0)
{
if(n & 1)
{
a_new *= a;
c_new = c_new * a + c;
}
c *= (a + 1);
a *= a;
n >>= 1;
}
return (a_new * seed + c_new) % m;
}
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// OPTIMIZED VARIANT FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// This section contains a number of optimized variants of some of the above
// functions, which each deploy a different combination of optimizations strategies
// specific to GPU. By default, XSBench will not run any of these variants. They
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// Optimization 1 -- Basic kernel splitting of sampling & lookup routines
////////////////////////////////////////////////////////////////////////////////////
// This optimization requires a little extra data to store all material IDs and
// energies for the sampled particles between kernel calls. By itself, this
// optimization is likely actually a bit of a slowdown compared to the baseline
// kernel. However, it will be used by better optimization kernels down the line.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_optimization_1(Inputs in, SimulationData GSD, int mype)
{
const char * optimization_name = "Optimization 1 - basic sample/lookup kernel splitting";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional device data required by kernel...\n");
size_t sz;
size_t total_sz = 0;
sz = in.lookups * sizeof(double);
gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) );
total_sz += sz;
GSD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) );
total_sz += sz;
GSD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Beginning optimized simulation...\n");
int nthreads = 32;
int nblocks = ceil( (double) in.lookups / 32.0);
hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD );
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( xs_lookup_kernel_optimization_1), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD );
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Reducing verification results...\n");
unsigned long verification_scalar = thrust::reduce(GSD.verification, GSD.verification + in.lookups, 0);
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
return verification_scalar;
}
__global__ void sampling_kernel(Inputs in, SimulationData GSD )
{
// The lookup ID.
const int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= in.lookups )
return;
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
// Store sample data in state array
GSD.p_energy_samples[i] = p_energy;
GSD.mat_samples[i] = mat;
}
__global__ void xs_lookup_kernel_optimization_1(Inputs in, SimulationData GSD )
{
// The lookup ID. Used to set the seed, and to store the verification value
const int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= in.lookups )
return;
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy)
GSD.mat_samples[i], // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
GSD.num_nucs, // 1-D array with number of nuclides per material
GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material
GSD.unionized_energy_array, // 1-D Unionized energy array
GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
GSD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we have each thread
// write to its thread_id index in an array, which we will reduce
// with a thrust reduction kernel after the main simulation kernel.
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
GSD.verification[i] = max_idx+1;
}
////////////////////////////////////////////////////////////////////////////////////
// Optimization 2 -- Kernel Splitting + Material-Specific Lookup Kernels
////////////////////////////////////////////////////////////////////////////////////
// This one builds on the first optimization. It uses multiple kernels, one
// for each material type, to better balance the workload across threads within
// a warp. This works because each material will have a different number of
// isotopes, with some having a ton, meaning that SIMD efficiency can be rather
// low by default. Better efficiency may be gained in further optimizations by
// sorting the lookups first.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_optimization_2(Inputs in, SimulationData GSD, int mype)
{
const char * optimization_name = "Optimization 2 - Material Lookup Kernels";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional device data required by kernel...\n");
size_t sz;
size_t total_sz = 0;
sz = in.lookups * sizeof(double);
gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) );
total_sz += sz;
GSD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) );
total_sz += sz;
GSD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Beginning optimized simulation...\n");
int nthreads = 32;
int nblocks = ceil( (double) in.lookups / 32.0);
hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD );
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
// Launch all material kernels individually
for( int m = 0; m < 12; m++ )
hipLaunchKernelGGL(( xs_lookup_kernel_optimization_2), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, m );
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Reducing verification results...\n");
unsigned long verification_scalar = thrust::reduce(GSD.verification, GSD.verification + in.lookups, 0);
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
return verification_scalar;
}
__global__ void xs_lookup_kernel_optimization_2(Inputs in, SimulationData GSD, int m )
{
// The lookup ID. Used to set the seed, and to store the verification value
const int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= in.lookups )
return;
// Check that our material type matches the kernel material
int mat = GSD.mat_samples[i];
if( mat != m )
return;
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
GSD.num_nucs, // 1-D array with number of nuclides per material
GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material
GSD.unionized_energy_array, // 1-D Unionized energy array
GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
GSD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we have each thread
// write to its thread_id index in an array, which we will reduce
// with a thrust reduction kernel after the main simulation kernel.
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
GSD.verification[i] = max_idx+1;
}
////////////////////////////////////////////////////////////////////////////////////
// Optimization 3 -- Kernel Splitting + Fuel or Not-Fuel Lookups
////////////////////////////////////////////////////////////////////////////////////
// This optimization alters Optimization 2. Instead of executing a kernel call for
// ALL different material types, only two different calls are made. One for fuel,
// and one for all the other materials. As the fuel material has by far the most
// isotopes, it takes much longer than the rest.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_optimization_3(Inputs in, SimulationData GSD, int mype)
{
const char * optimization_name = "Optimization 3 - Fuel or Other Lookup Kernels";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional device data required by kernel...\n");
size_t sz;
size_t total_sz = 0;
sz = in.lookups * sizeof(double);
gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) );
total_sz += sz;
GSD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) );
total_sz += sz;
GSD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Beginning optimized simulation...\n");
int nthreads = 32;
int nblocks = ceil( (double) in.lookups / 32.0);
hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD );
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
// Launch all material kernels individually
hipLaunchKernelGGL(( xs_lookup_kernel_optimization_3), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, 0 );
hipLaunchKernelGGL(( xs_lookup_kernel_optimization_3), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, 1 );
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Reducing verification results...\n");
unsigned long verification_scalar = thrust::reduce(GSD.verification, GSD.verification + in.lookups, 0);
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
return verification_scalar;
}
__global__ void xs_lookup_kernel_optimization_3(Inputs in, SimulationData GSD, int is_fuel )
{
// The lookup ID. Used to set the seed, and to store the verification value
const int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= in.lookups )
return;
int mat = GSD.mat_samples[i];
// If this is the fuel kernel, AND this is a fuel lookup, then perform a lookup
// OR if this is not the fuel kernel, AND this is not a fuel lookup, then perform the lookup
if( ((is_fuel == 1) && (mat == 0)) || ((is_fuel == 0) && (mat != 0 ) ))
{
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
GSD.num_nucs, // 1-D array with number of nuclides per material
GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material
GSD.unionized_energy_array, // 1-D Unionized energy array
GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
GSD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we have each thread
// write to its thread_id index in an array, which we will reduce
// with a thrust reduction kernel after the main simulation kernel.
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
GSD.verification[i] = max_idx+1;
}
}
////////////////////////////////////////////////////////////////////////////////////
// Optimization 4 -- Kernel Splitting + All Material Lookups + Full Sort
////////////////////////////////////////////////////////////////////////////////////
// This optimization builds on optimization 2, adding in a full sort before
// hand so that the warps should be densely packed together. This should maximize
// SIMD efficiency of the kernel, but may incur an added cost for the sort.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_optimization_4(Inputs in, SimulationData GSD, int mype)
{
const char * optimization_name = "Optimization 4 - All Material Lookup Kernels + Material Sort";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional device data required by kernel...\n");
size_t sz;
size_t total_sz = 0;
sz = in.lookups * sizeof(double);
gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) );
total_sz += sz;
GSD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) );
total_sz += sz;
GSD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Beginning optimized simulation...\n");
int nthreads = 32;
int nblocks = ceil( (double) in.lookups / 32.0);
hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD );
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
// Count the number of fuel material lookups that need to be performed (fuel id = 0)
int n_lookups_per_material[12];
for( int m = 0; m < 12; m++ )
n_lookups_per_material[m] = thrust::count(GSD.mat_samples, GSD.mat_samples + in.lookups, m);
// Sort materials
thrust::sort_by_key(GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples);
// Launch all material kernels individually
int offset = 0;
for( int m = 0; m < 12; m++ )
{
nthreads = 32;
nblocks = ceil((double) n_lookups_per_material[m] / (double) nthreads);
hipLaunchKernelGGL(( xs_lookup_kernel_optimization_4), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, m, n_lookups_per_material[m], offset );
offset += n_lookups_per_material[m];
}
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Reducing verification results...\n");
unsigned long verification_scalar = thrust::reduce(GSD.verification, GSD.verification + in.lookups, 0);
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
return verification_scalar;
}
__global__ void xs_lookup_kernel_optimization_4(Inputs in, SimulationData GSD, int m, int n_lookups, int offset )
{
// The lookup ID. Used to set the seed, and to store the verification value
int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= n_lookups )
return;
i += offset;
// Check that our material type matches the kernel material
int mat = GSD.mat_samples[i];
if( mat != m )
return;
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
GSD.num_nucs, // 1-D array with number of nuclides per material
GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material
GSD.unionized_energy_array, // 1-D Unionized energy array
GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
GSD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we have each thread
// write to its thread_id index in an array, which we will reduce
// with a thrust reduction kernel after the main simulation kernel.
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
GSD.verification[i] = max_idx+1;
}
////////////////////////////////////////////////////////////////////////////////////
// Optimization 5 -- Kernel Splitting + Fuel/Other Lookups + Fuel/Other Partition
////////////////////////////////////////////////////////////////////////////////////
// This optimization is similar to optimization 4, but instead of sorting
// fully by material, we just sort by fuel or not fuel. Similarly, instead of
// launching kernels for all materials, similar to optimization 3 we only launch
// kernels for the fuel and other mateirals.
////////////////////////////////////////////////////////////////////////////////////
// Comparator for partitioning stage
struct is_mat_fuel{
__host__ __device__
bool operator()(const int & a)
{
return a == 0;
}
};
/* unsigned long long run_event_based_simulation_optimization_5(Inputs in, SimulationData GSD, int mype) */
/* { */
/* const char * optimization_name = "Optimization 5 - Fuel/No Fuel Lookup Kernels + Fuel/No Fuel Sort"; */
/* if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); */
/* //////////////////////////////////////////////////////////////////////////////// */
/* // Allocate Additional Data Structures Needed by Optimized Kernel */
/* //////////////////////////////////////////////////////////////////////////////// */
/* if( mype == 0) printf("Allocating additional device data required by kernel...\n"); */
/* size_t sz; */
/* size_t total_sz = 0; */
/* sz = in.lookups * sizeof(double); */
/* gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) ); */
/* total_sz += sz; */
/* GSD.length_p_energy_samples = in.lookups; */
/* sz = in.lookups * sizeof(int); */
/* gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) ); */
/* total_sz += sz; */
/* GSD.length_mat_samples = in.lookups; */
/* if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); */
/* //////////////////////////////////////////////////////////////////////////////// */
/* // Configure & Launch Simulation Kernel */
/* //////////////////////////////////////////////////////////////////////////////// */
/* if( mype == 0) printf("Beginning optimized simulation...\n"); */
/* int nthreads = 32; */
/* int nblocks = ceil( (double) in.lookups / 32.0); */
/* hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); */
/* gpuErrchk( hipPeekAtLastError() ); */
/* gpuErrchk( hipDeviceSynchronize() ); */
/* // Count the number of fuel material lookups that need to be performed (fuel id = 0) */
/* int n_fuel_lookups = thrust::count(GSD.mat_samples, GSD.mat_samples + in.lookups, 0); */
/* // Partition fuel into the first part of the array */
/* thrust::partition(GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples, is_mat_fuel()); */
/* // Launch all material kernels individually (asynchronous is allowed) */
/* nblocks = ceil( (double) n_fuel_lookups / (double) nthreads); */
/* xs_lookup_kernel_optimization_5<<<nblocks, nthreads>>>( in, GSD, n_fuel_lookups, 0 ); */
/* nblocks = ceil( (double) (in.lookups - n_fuel_lookups) / (double) nthreads); */
/* xs_lookup_kernel_optimization_5<<<nblocks, nthreads>>>( in, GSD, in.lookups-n_fuel_lookups, n_fuel_lookups ); */
/* gpuErrchk( hipPeekAtLastError() ); */
/* gpuErrchk( hipDeviceSynchronize() ); */
/* //////////////////////////////////////////////////////////////////////////////// */
/* // Reduce Verification Results */
/* //////////////////////////////////////////////////////////////////////////////// */
/* if( mype == 0) printf("Reducing verification results...\n"); */
/* unsigned long verification_scalar = thrust::reduce(GSD.verification, GSD.verification + in.lookups, 0); */
/* gpuErrchk( hipPeekAtLastError() ); */
/* gpuErrchk( hipDeviceSynchronize() ); */
/* return verification_scalar; */
/* } */
/* __global__ void xs_lookup_kernel_optimization_5(Inputs in, SimulationData GSD, int n_lookups, int offset ) */
/* { */
/* // The lookup ID. Used to set the seed, and to store the verification value */
/* int i = blockIdx.x *blockDim.x + threadIdx.x; */
/* if( i >= n_lookups ) */
/* return; */
/* i += offset; */
/* double macro_xs_vector[5] = {0}; */
/* // Perform macroscopic Cross Section Lookup */
/* calculate_macro_xs( */
/* GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) */
/* GSD.mat_samples[i], // Sampled material type index neutron is in */
/* in.n_isotopes, // Total number of isotopes in simulation */
/* in.n_gridpoints, // Number of gridpoints per isotope in simulation */
/* GSD.num_nucs, // 1-D array with number of nuclides per material */
/* GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material */
/* GSD.unionized_energy_array, // 1-D Unionized energy array */
/* GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level */
/* GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation */
/* GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material */
/* macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) */
/* in.grid_type, // Lookup type (nuclide, hash, or unionized) */
/* in.hash_bins, // Number of hash bins used (if using hash lookup type) */
/* GSD.max_num_nucs // Maximum number of nuclides present in any material */
/* ); */
/* // For verification, and to prevent the compiler from optimizing */
/* // all work out, we interrogate the returned macro_xs_vector array */
/* // to find its maximum value index, then increment the verification */
/* // value by that index. In this implementation, we have each thread */
/* // write to its thread_id index in an array, which we will reduce */
/* // with a thrust reduction kernel after the main simulation kernel. */
/* double max = -1.0; */
/* int max_idx = 0; */
/* for(int j = 0; j < 5; j++ ) */
/* { */
/* if( macro_xs_vector[j] > max ) */
/* { */
/* max = macro_xs_vector[j]; */
/* max_idx = j; */
/* } */
/* } */
/* GSD.verification[i] = max_idx+1; */
/* } */
////////////////////////////////////////////////////////////////////////////////////
// Optimization 6 -- Kernel Splitting + All Material Lookups + Full Sort
// + Energy Sort
////////////////////////////////////////////////////////////////////////////////////
// This optimization builds on optimization 4, adding in a second sort by energy.
// It is extremely fast, as now most of the threads within a warp will be hitting
// the same indices in the lookup grids. This greatly reduces thread divergence and
// greatly improves cache efficiency and re-use.
//
// However, it is unlikely that this exact optimization would be possible in a real
// application like OpenMC. One major difference is that particle objects are quite
// large, often having 50+ variable fields, such that sorting them in memory becomes
// rather expensive. Instead, the best possible option would probably be to create
// intermediate indexing (per Hamilton et. al 2019), and run the kernels indirectly.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_optimization_6(Inputs in, SimulationData GSD, int mype)
{
const char * optimization_name = "Optimization 6 - Material & Energy Sorts + Material-specific Kernels";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional device data required by kernel...\n");
size_t sz;
size_t total_sz = 0;
sz = in.lookups * sizeof(double);
gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) );
total_sz += sz;
GSD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) );
total_sz += sz;
GSD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Beginning optimized simulation...\n");
int nthreads = 32;
int nblocks = ceil( (double) in.lookups / 32.0);
hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD );
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
// Count the number of fuel material lookups that need to be performed (fuel id = 0)
int n_lookups_per_material[12];
for( int m = 0; m < 12; m++ )
n_lookups_per_material[m] = thrust::count(GSD.mat_samples, GSD.mat_samples + in.lookups, m);
// Sort by material first
thrust::sort_by_key(GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples);
// Now, sort each material by energy
int offset = 0;
for( int m = 0; m < 12; m++ )
{
thrust::sort_by_key(GSD.p_energy_samples + offset, GSD.p_energy_samples + offset + n_lookups_per_material[m], GSD.mat_samples + offset);
offset += n_lookups_per_material[m];
}
// Launch all material kernels individually
offset = 0;
for( int m = 0; m < 12; m++ )
{
nthreads = 32;
nblocks = ceil((double) n_lookups_per_material[m] / (double) nthreads);
hipLaunchKernelGGL(( xs_lookup_kernel_optimization_4), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, m, n_lookups_per_material[m], offset );
offset += n_lookups_per_material[m];
}
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Reducing verification results...\n");
unsigned long verification_scalar = thrust::reduce(GSD.verification, GSD.verification + in.lookups, 0);
/* gpuErrchk( hipPeekAtLastError() ); */
gpuErrchk( hipDeviceSynchronize() );
return verification_scalar;
}
| 65d692f6b8eeece28c1a7505a63747926fcfb476.cu | #include "XSbench_header.cuh"
////////////////////////////////////////////////////////////////////////////////////
// BASELINE FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// All "baseline" code is at the top of this file. The baseline code is a simple
// port of the original CPU OpenMP code to CUDA with few significant changes or
// optimizations made. Following these functions are a number of optimized variants,
// which each deploy a different combination of optimizations strategies. By
// default, XSBench will only run the baseline implementation. Optimized variants
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_baseline(Inputs in, SimulationData GSD, int mype, cudaStream_t stream_app)
{
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Running baseline event-based simulation...\n");
int nthreads = 32;
int nblocks = ceil( (double) in.lookups / 32.0);
xs_lookup_kernel_baseline<<<nblocks, nthreads, 0, stream_app>>>( in, GSD );
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Reducing verification results...\n");
/* unsigned long verification_scalar = thrust::reduce(GSD.verification, GSD.verification + in.lookups, 0); */
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
/* return verification_scalar; */
return 0;
}
// In this kernel, we perform a single lookup with each thread. Threads within a warp
// do not really have any relation to each other, and divergence due to high nuclide count fuel
// material lookups are costly. This kernel constitutes baseline performance.
__global__ void xs_lookup_kernel_baseline(Inputs in, SimulationData GSD )
{
// The lookup ID. Used to set the seed, and to store the verification value
const int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= in.lookups )
return;
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
GSD.num_nucs, // 1-D array with number of nuclides per material
GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material
GSD.unionized_energy_array, // 1-D Unionized energy array
GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
GSD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we have each thread
// write to its thread_id index in an array, which we will reduce
// with a thrust reduction kernel after the main simulation kernel.
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
GSD.verification[i] = max_idx+1;
}
// Calculates the microscopic cross section for a given nuclide & energy
__device__ void calculate_micro_xs( double p_energy, int nuc, long n_isotopes,
long n_gridpoints,
double * __restrict__ egrid, int * __restrict__ index_data,
NuclideGridPoint * __restrict__ nuclide_grids,
long idx, double * __restrict__ xs_vector, int grid_type, int hash_bins ){
// Variables
double f;
NuclideGridPoint * low, * high;
// If using only the nuclide grid, we must perform a binary search
// to find the energy location in this particular nuclide's grid.
if( grid_type == NUCLIDE )
{
// Perform binary search on the Nuclide Grid to find the index
idx = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], 0, n_gridpoints-1);
// pull ptr from nuclide grid and check to ensure that
// we're not reading off the end of the nuclide's grid
if( idx == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + idx - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + idx];
}
else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed.
{
// pull ptr from energy grid and check to ensure that
// we're not reading off the end of the nuclide's grid
if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]];
}
else // Hash grid
{
// load lower bounding index
int u_low = index_data[idx * n_isotopes + nuc];
// Determine higher bounding index
int u_high;
if( idx == hash_bins - 1 )
u_high = n_gridpoints - 1;
else
u_high = index_data[(idx+1)*n_isotopes + nuc] + 1;
// Check edge cases to make sure energy is actually between these
// Then, if things look good, search for gridpoint in the nuclide grid
// within the lower and higher limits we've calculated.
double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy;
double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy;
int lower;
if( p_energy <= e_low )
lower = 0;
else if( p_energy >= e_high )
lower = n_gridpoints - 1;
else
lower = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], u_low, u_high);
if( lower == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + lower - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + lower];
}
high = low + 1;
// calculate the re-useable interpolation factor
f = (high->energy - p_energy) / (high->energy - low->energy);
// Total XS
xs_vector[0] = high->total_xs - f * (high->total_xs - low->total_xs);
// Elastic XS
xs_vector[1] = high->elastic_xs - f * (high->elastic_xs - low->elastic_xs);
// Absorbtion XS
xs_vector[2] = high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs);
// Fission XS
xs_vector[3] = high->fission_xs - f * (high->fission_xs - low->fission_xs);
// Nu Fission XS
xs_vector[4] = high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs);
}
// Calculates macroscopic cross section based on a given material & energy
__device__ void calculate_macro_xs( double p_energy, int mat, long n_isotopes,
long n_gridpoints, int * __restrict__ num_nucs,
double * __restrict__ concs,
double * __restrict__ egrid, int * __restrict__ index_data,
NuclideGridPoint * __restrict__ nuclide_grids,
int * __restrict__ mats,
double * __restrict__ macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){
int p_nuc; // the nuclide we are looking up
long idx = -1;
double conc; // the concentration of the nuclide in the material
// cleans out macro_xs_vector
for( int k = 0; k < 5; k++ )
macro_xs_vector[k] = 0;
// If we are using the unionized energy grid (UEG), we only
// need to perform 1 binary search per macroscopic lookup.
// If we are using the nuclide grid search, it will have to be
// done inside of the "calculate_micro_xs" function for each different
// nuclide in the material.
if( grid_type == UNIONIZED )
idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid);
else if( grid_type == HASH )
{
double du = 1.0 / hash_bins;
idx = p_energy / du;
}
// Once we find the pointer array on the UEG, we can pull the data
// from the respective nuclide grids, as well as the nuclide
// concentration data for the material
// Each nuclide from the material needs to have its micro-XS array
// looked up & interpolatied (via calculate_micro_xs). Then, the
// micro XS is multiplied by the concentration of that nuclide
// in the material, and added to the total macro XS array.
// (Independent -- though if parallelizing, must use atomic operations
// or otherwise control access to the xs_vector and macro_xs_vector to
// avoid simulataneous writing to the same data structure)
for( int j = 0; j < num_nucs[mat]; j++ )
{
double xs_vector[5];
p_nuc = mats[mat*max_num_nucs + j];
conc = concs[mat*max_num_nucs + j];
calculate_micro_xs( p_energy, p_nuc, n_isotopes,
n_gridpoints, egrid, index_data,
nuclide_grids, idx, xs_vector, grid_type, hash_bins );
for( int k = 0; k < 5; k++ )
macro_xs_vector[k] += xs_vector[k] * conc;
}
}
// binary search for energy on unionized energy grid
// returns lower index
__device__ long grid_search( long n, double quarry, double * __restrict__ A)
{
long lowerLimit = 0;
long upperLimit = n-1;
long examinationPoint;
long length = upperLimit - lowerLimit;
while( length > 1 )
{
examinationPoint = lowerLimit + ( length / 2 );
if( A[examinationPoint] > quarry )
upperLimit = examinationPoint;
else
lowerLimit = examinationPoint;
length = upperLimit - lowerLimit;
}
return lowerLimit;
}
// binary search for energy on nuclide energy grid
__host__ __device__ long grid_search_nuclide( long n, double quarry, NuclideGridPoint * A, long low, long high)
{
long lowerLimit = low;
long upperLimit = high;
long examinationPoint;
long length = upperLimit - lowerLimit;
while( length > 1 )
{
examinationPoint = lowerLimit + ( length / 2 );
if( A[examinationPoint].energy > quarry )
upperLimit = examinationPoint;
else
lowerLimit = examinationPoint;
length = upperLimit - lowerLimit;
}
return lowerLimit;
}
// picks a material based on a probabilistic distribution
__device__ int pick_mat( uint64_t * seed )
{
// I have a nice spreadsheet supporting these numbers. They are
// the fractions (by volume) of material in the core. Not a
// *perfect* approximation of where XS lookups are going to occur,
// but this will do a good job of biasing the system nonetheless.
// Also could be argued that doing fractions by weight would be
// a better approximation, but volume does a good enough job for now.
double dist[12];
dist[0] = 0.140; // fuel
dist[1] = 0.052; // cladding
dist[2] = 0.275; // cold, borated water
dist[3] = 0.134; // hot, borated water
dist[4] = 0.154; // RPV
dist[5] = 0.064; // Lower, radial reflector
dist[6] = 0.066; // Upper reflector / top plate
dist[7] = 0.055; // bottom plate
dist[8] = 0.008; // bottom nozzle
dist[9] = 0.015; // top nozzle
dist[10] = 0.025; // top of fuel assemblies
dist[11] = 0.013; // bottom of fuel assemblies
double roll = LCG_random_double(seed);
// makes a pick based on the distro
for( int i = 0; i < 12; i++ )
{
double running = 0;
for( int j = i; j > 0; j-- )
running += dist[j];
if( roll < running )
return i;
}
return 0;
}
__host__ __device__ double LCG_random_double(uint64_t * seed)
{
// LCG parameters
const uint64_t m = 9223372036854775808ULL; // 2^63
const uint64_t a = 2806196910506780709ULL;
const uint64_t c = 1ULL;
*seed = (a * (*seed) + c) % m;
return (double) (*seed) / (double) m;
}
__device__ uint64_t fast_forward_LCG(uint64_t seed, uint64_t n)
{
// LCG parameters
const uint64_t m = 9223372036854775808ULL; // 2^63
uint64_t a = 2806196910506780709ULL;
uint64_t c = 1ULL;
n = n % m;
uint64_t a_new = 1;
uint64_t c_new = 0;
while(n > 0)
{
if(n & 1)
{
a_new *= a;
c_new = c_new * a + c;
}
c *= (a + 1);
a *= a;
n >>= 1;
}
return (a_new * seed + c_new) % m;
}
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// OPTIMIZED VARIANT FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// This section contains a number of optimized variants of some of the above
// functions, which each deploy a different combination of optimizations strategies
// specific to GPU. By default, XSBench will not run any of these variants. They
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// Optimization 1 -- Basic kernel splitting of sampling & lookup routines
////////////////////////////////////////////////////////////////////////////////////
// This optimization requires a little extra data to store all material IDs and
// energies for the sampled particles between kernel calls. By itself, this
// optimization is likely actually a bit of a slowdown compared to the baseline
// kernel. However, it will be used by better optimization kernels down the line.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_optimization_1(Inputs in, SimulationData GSD, int mype)
{
const char * optimization_name = "Optimization 1 - basic sample/lookup kernel splitting";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional device data required by kernel...\n");
size_t sz;
size_t total_sz = 0;
sz = in.lookups * sizeof(double);
gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) );
total_sz += sz;
GSD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) );
total_sz += sz;
GSD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Beginning optimized simulation...\n");
int nthreads = 32;
int nblocks = ceil( (double) in.lookups / 32.0);
sampling_kernel<<<nblocks, nthreads>>>( in, GSD );
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
xs_lookup_kernel_optimization_1<<<nblocks, nthreads>>>( in, GSD );
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Reducing verification results...\n");
unsigned long verification_scalar = thrust::reduce(GSD.verification, GSD.verification + in.lookups, 0);
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
return verification_scalar;
}
__global__ void sampling_kernel(Inputs in, SimulationData GSD )
{
// The lookup ID.
const int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= in.lookups )
return;
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
// Store sample data in state array
GSD.p_energy_samples[i] = p_energy;
GSD.mat_samples[i] = mat;
}
__global__ void xs_lookup_kernel_optimization_1(Inputs in, SimulationData GSD )
{
// The lookup ID. Used to set the seed, and to store the verification value
const int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= in.lookups )
return;
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy)
GSD.mat_samples[i], // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
GSD.num_nucs, // 1-D array with number of nuclides per material
GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material
GSD.unionized_energy_array, // 1-D Unionized energy array
GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
GSD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we have each thread
// write to its thread_id index in an array, which we will reduce
// with a thrust reduction kernel after the main simulation kernel.
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
GSD.verification[i] = max_idx+1;
}
////////////////////////////////////////////////////////////////////////////////////
// Optimization 2 -- Kernel Splitting + Material-Specific Lookup Kernels
////////////////////////////////////////////////////////////////////////////////////
// This one builds on the first optimization. It uses multiple kernels, one
// for each material type, to better balance the workload across threads within
// a warp. This works because each material will have a different number of
// isotopes, with some having a ton, meaning that SIMD efficiency can be rather
// low by default. Better efficiency may be gained in further optimizations by
// sorting the lookups first.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_optimization_2(Inputs in, SimulationData GSD, int mype)
{
const char * optimization_name = "Optimization 2 - Material Lookup Kernels";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional device data required by kernel...\n");
size_t sz;
size_t total_sz = 0;
sz = in.lookups * sizeof(double);
gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) );
total_sz += sz;
GSD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) );
total_sz += sz;
GSD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Beginning optimized simulation...\n");
int nthreads = 32;
int nblocks = ceil( (double) in.lookups / 32.0);
sampling_kernel<<<nblocks, nthreads>>>( in, GSD );
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
// Launch all material kernels individually
for( int m = 0; m < 12; m++ )
xs_lookup_kernel_optimization_2<<<nblocks, nthreads>>>( in, GSD, m );
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Reducing verification results...\n");
unsigned long verification_scalar = thrust::reduce(GSD.verification, GSD.verification + in.lookups, 0);
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
return verification_scalar;
}
__global__ void xs_lookup_kernel_optimization_2(Inputs in, SimulationData GSD, int m )
{
// The lookup ID. Used to set the seed, and to store the verification value
const int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= in.lookups )
return;
// Check that our material type matches the kernel material
int mat = GSD.mat_samples[i];
if( mat != m )
return;
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
GSD.num_nucs, // 1-D array with number of nuclides per material
GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material
GSD.unionized_energy_array, // 1-D Unionized energy array
GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
GSD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we have each thread
// write to its thread_id index in an array, which we will reduce
// with a thrust reduction kernel after the main simulation kernel.
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
GSD.verification[i] = max_idx+1;
}
////////////////////////////////////////////////////////////////////////////////////
// Optimization 3 -- Kernel Splitting + Fuel or Not-Fuel Lookups
////////////////////////////////////////////////////////////////////////////////////
// This optimization alters Optimization 2. Instead of executing a kernel call for
// ALL different material types, only two different calls are made. One for fuel,
// and one for all the other materials. As the fuel material has by far the most
// isotopes, it takes much longer than the rest.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_optimization_3(Inputs in, SimulationData GSD, int mype)
{
const char * optimization_name = "Optimization 3 - Fuel or Other Lookup Kernels";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional device data required by kernel...\n");
size_t sz;
size_t total_sz = 0;
sz = in.lookups * sizeof(double);
gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) );
total_sz += sz;
GSD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) );
total_sz += sz;
GSD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Beginning optimized simulation...\n");
int nthreads = 32;
int nblocks = ceil( (double) in.lookups / 32.0);
sampling_kernel<<<nblocks, nthreads>>>( in, GSD );
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
// Launch all material kernels individually
xs_lookup_kernel_optimization_3<<<nblocks, nthreads>>>( in, GSD, 0 );
xs_lookup_kernel_optimization_3<<<nblocks, nthreads>>>( in, GSD, 1 );
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Reducing verification results...\n");
unsigned long verification_scalar = thrust::reduce(GSD.verification, GSD.verification + in.lookups, 0);
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
return verification_scalar;
}
__global__ void xs_lookup_kernel_optimization_3(Inputs in, SimulationData GSD, int is_fuel )
{
// The lookup ID. Used to set the seed, and to store the verification value
const int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= in.lookups )
return;
int mat = GSD.mat_samples[i];
// If this is the fuel kernel, AND this is a fuel lookup, then perform a lookup
// OR if this is not the fuel kernel, AND this is not a fuel lookup, then perform the lookup
if( ((is_fuel == 1) && (mat == 0)) || ((is_fuel == 0) && (mat != 0 ) ))
{
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
GSD.num_nucs, // 1-D array with number of nuclides per material
GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material
GSD.unionized_energy_array, // 1-D Unionized energy array
GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
GSD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we have each thread
// write to its thread_id index in an array, which we will reduce
// with a thrust reduction kernel after the main simulation kernel.
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
GSD.verification[i] = max_idx+1;
}
}
////////////////////////////////////////////////////////////////////////////////////
// Optimization 4 -- Kernel Splitting + All Material Lookups + Full Sort
////////////////////////////////////////////////////////////////////////////////////
// This optimization builds on optimization 2, adding in a full sort before
// hand so that the warps should be densely packed together. This should maximize
// SIMD efficiency of the kernel, but may incur an added cost for the sort.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_optimization_4(Inputs in, SimulationData GSD, int mype)
{
const char * optimization_name = "Optimization 4 - All Material Lookup Kernels + Material Sort";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional device data required by kernel...\n");
size_t sz;
size_t total_sz = 0;
sz = in.lookups * sizeof(double);
gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) );
total_sz += sz;
GSD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) );
total_sz += sz;
GSD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Beginning optimized simulation...\n");
int nthreads = 32;
int nblocks = ceil( (double) in.lookups / 32.0);
sampling_kernel<<<nblocks, nthreads>>>( in, GSD );
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
// Count the number of fuel material lookups that need to be performed (fuel id = 0)
int n_lookups_per_material[12];
for( int m = 0; m < 12; m++ )
n_lookups_per_material[m] = thrust::count(GSD.mat_samples, GSD.mat_samples + in.lookups, m);
// Sort materials
thrust::sort_by_key(GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples);
// Launch all material kernels individually
int offset = 0;
for( int m = 0; m < 12; m++ )
{
nthreads = 32;
nblocks = ceil((double) n_lookups_per_material[m] / (double) nthreads);
xs_lookup_kernel_optimization_4<<<nblocks, nthreads>>>( in, GSD, m, n_lookups_per_material[m], offset );
offset += n_lookups_per_material[m];
}
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Reducing verification results...\n");
unsigned long verification_scalar = thrust::reduce(GSD.verification, GSD.verification + in.lookups, 0);
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
return verification_scalar;
}
__global__ void xs_lookup_kernel_optimization_4(Inputs in, SimulationData GSD, int m, int n_lookups, int offset )
{
// The lookup ID. Used to set the seed, and to store the verification value
int i = blockIdx.x *blockDim.x + threadIdx.x;
if( i >= n_lookups )
return;
i += offset;
// Check that our material type matches the kernel material
int mat = GSD.mat_samples[i];
if( mat != m )
return;
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
GSD.num_nucs, // 1-D array with number of nuclides per material
GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material
GSD.unionized_energy_array, // 1-D Unionized energy array
GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
GSD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we have each thread
// write to its thread_id index in an array, which we will reduce
// with a thrust reduction kernel after the main simulation kernel.
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
GSD.verification[i] = max_idx+1;
}
////////////////////////////////////////////////////////////////////////////////////
// Optimization 5 -- Kernel Splitting + Fuel/Other Lookups + Fuel/Other Partition
////////////////////////////////////////////////////////////////////////////////////
// This optimization is similar to optimization 4, but instead of sorting
// fully by material, we just sort by fuel or not fuel. Similarly, instead of
// launching kernels for all materials, similar to optimization 3 we only launch
// kernels for the fuel and other mateirals.
////////////////////////////////////////////////////////////////////////////////////
// Comparator for partitioning stage
struct is_mat_fuel{
__host__ __device__
bool operator()(const int & a)
{
return a == 0;
}
};
/* unsigned long long run_event_based_simulation_optimization_5(Inputs in, SimulationData GSD, int mype) */
/* { */
/* const char * optimization_name = "Optimization 5 - Fuel/No Fuel Lookup Kernels + Fuel/No Fuel Sort"; */
/* if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); */
/* //////////////////////////////////////////////////////////////////////////////// */
/* // Allocate Additional Data Structures Needed by Optimized Kernel */
/* //////////////////////////////////////////////////////////////////////////////// */
/* if( mype == 0) printf("Allocating additional device data required by kernel...\n"); */
/* size_t sz; */
/* size_t total_sz = 0; */
/* sz = in.lookups * sizeof(double); */
/* gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) ); */
/* total_sz += sz; */
/* GSD.length_p_energy_samples = in.lookups; */
/* sz = in.lookups * sizeof(int); */
/* gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) ); */
/* total_sz += sz; */
/* GSD.length_mat_samples = in.lookups; */
/* if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); */
/* //////////////////////////////////////////////////////////////////////////////// */
/* // Configure & Launch Simulation Kernel */
/* //////////////////////////////////////////////////////////////////////////////// */
/* if( mype == 0) printf("Beginning optimized simulation...\n"); */
/* int nthreads = 32; */
/* int nblocks = ceil( (double) in.lookups / 32.0); */
/* sampling_kernel<<<nblocks, nthreads>>>( in, GSD ); */
/* gpuErrchk( cudaPeekAtLastError() ); */
/* gpuErrchk( cudaDeviceSynchronize() ); */
/* // Count the number of fuel material lookups that need to be performed (fuel id = 0) */
/* int n_fuel_lookups = thrust::count(GSD.mat_samples, GSD.mat_samples + in.lookups, 0); */
/* // Partition fuel into the first part of the array */
/* thrust::partition(GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples, is_mat_fuel()); */
/* // Launch all material kernels individually (asynchronous is allowed) */
/* nblocks = ceil( (double) n_fuel_lookups / (double) nthreads); */
/* xs_lookup_kernel_optimization_5<<<nblocks, nthreads>>>( in, GSD, n_fuel_lookups, 0 ); */
/* nblocks = ceil( (double) (in.lookups - n_fuel_lookups) / (double) nthreads); */
/* xs_lookup_kernel_optimization_5<<<nblocks, nthreads>>>( in, GSD, in.lookups-n_fuel_lookups, n_fuel_lookups ); */
/* gpuErrchk( cudaPeekAtLastError() ); */
/* gpuErrchk( cudaDeviceSynchronize() ); */
/* //////////////////////////////////////////////////////////////////////////////// */
/* // Reduce Verification Results */
/* //////////////////////////////////////////////////////////////////////////////// */
/* if( mype == 0) printf("Reducing verification results...\n"); */
/* unsigned long verification_scalar = thrust::reduce(GSD.verification, GSD.verification + in.lookups, 0); */
/* gpuErrchk( cudaPeekAtLastError() ); */
/* gpuErrchk( cudaDeviceSynchronize() ); */
/* return verification_scalar; */
/* } */
/* __global__ void xs_lookup_kernel_optimization_5(Inputs in, SimulationData GSD, int n_lookups, int offset ) */
/* { */
/* // The lookup ID. Used to set the seed, and to store the verification value */
/* int i = blockIdx.x *blockDim.x + threadIdx.x; */
/* if( i >= n_lookups ) */
/* return; */
/* i += offset; */
/* double macro_xs_vector[5] = {0}; */
/* // Perform macroscopic Cross Section Lookup */
/* calculate_macro_xs( */
/* GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) */
/* GSD.mat_samples[i], // Sampled material type index neutron is in */
/* in.n_isotopes, // Total number of isotopes in simulation */
/* in.n_gridpoints, // Number of gridpoints per isotope in simulation */
/* GSD.num_nucs, // 1-D array with number of nuclides per material */
/* GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material */
/* GSD.unionized_energy_array, // 1-D Unionized energy array */
/* GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level */
/* GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation */
/* GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material */
/* macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) */
/* in.grid_type, // Lookup type (nuclide, hash, or unionized) */
/* in.hash_bins, // Number of hash bins used (if using hash lookup type) */
/* GSD.max_num_nucs // Maximum number of nuclides present in any material */
/* ); */
/* // For verification, and to prevent the compiler from optimizing */
/* // all work out, we interrogate the returned macro_xs_vector array */
/* // to find its maximum value index, then increment the verification */
/* // value by that index. In this implementation, we have each thread */
/* // write to its thread_id index in an array, which we will reduce */
/* // with a thrust reduction kernel after the main simulation kernel. */
/* double max = -1.0; */
/* int max_idx = 0; */
/* for(int j = 0; j < 5; j++ ) */
/* { */
/* if( macro_xs_vector[j] > max ) */
/* { */
/* max = macro_xs_vector[j]; */
/* max_idx = j; */
/* } */
/* } */
/* GSD.verification[i] = max_idx+1; */
/* } */
////////////////////////////////////////////////////////////////////////////////////
// Optimization 6 -- Kernel Splitting + All Material Lookups + Full Sort
// + Energy Sort
////////////////////////////////////////////////////////////////////////////////////
// This optimization builds on optimization 4, adding in a second sort by energy.
// It is extremely fast, as now most of the threads within a warp will be hitting
// the same indices in the lookup grids. This greatly reduces thread divergence and
// greatly improves cache efficiency and re-use.
//
// However, it is unlikely that this exact optimization would be possible in a real
// application like OpenMC. One major difference is that particle objects are quite
// large, often having 50+ variable fields, such that sorting them in memory becomes
// rather expensive. Instead, the best possible option would probably be to create
// intermediate indexing (per Hamilton et. al 2019), and run the kernels indirectly.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_optimization_6(Inputs in, SimulationData GSD, int mype)
{
const char * optimization_name = "Optimization 6 - Material & Energy Sorts + Material-specific Kernels";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional device data required by kernel...\n");
size_t sz;
size_t total_sz = 0;
sz = in.lookups * sizeof(double);
gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) );
total_sz += sz;
GSD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) );
total_sz += sz;
GSD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Configure & Launch Simulation Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Beginning optimized simulation...\n");
int nthreads = 32;
int nblocks = ceil( (double) in.lookups / 32.0);
sampling_kernel<<<nblocks, nthreads>>>( in, GSD );
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
// Count the number of fuel material lookups that need to be performed (fuel id = 0)
int n_lookups_per_material[12];
for( int m = 0; m < 12; m++ )
n_lookups_per_material[m] = thrust::count(GSD.mat_samples, GSD.mat_samples + in.lookups, m);
// Sort by material first
thrust::sort_by_key(GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples);
// Now, sort each material by energy
int offset = 0;
for( int m = 0; m < 12; m++ )
{
thrust::sort_by_key(GSD.p_energy_samples + offset, GSD.p_energy_samples + offset + n_lookups_per_material[m], GSD.mat_samples + offset);
offset += n_lookups_per_material[m];
}
// Launch all material kernels individually
offset = 0;
for( int m = 0; m < 12; m++ )
{
nthreads = 32;
nblocks = ceil((double) n_lookups_per_material[m] / (double) nthreads);
xs_lookup_kernel_optimization_4<<<nblocks, nthreads>>>( in, GSD, m, n_lookups_per_material[m], offset );
offset += n_lookups_per_material[m];
}
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
////////////////////////////////////////////////////////////////////////////////
// Reduce Verification Results
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Reducing verification results...\n");
unsigned long verification_scalar = thrust::reduce(GSD.verification, GSD.verification + in.lookups, 0);
/* gpuErrchk( cudaPeekAtLastError() ); */
gpuErrchk( cudaDeviceSynchronize() );
return verification_scalar;
}
|
bc5e8e94dfbfaa3817e8707054707d06bc3c0297.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "forward_maxpool_layer_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int in_h = 1;
int in_w = 1;
int in_c = 1;
int stride_x = 2;
int stride_y = 2;
int size = XSIZE*YSIZE;
int pad = 2;
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int *indexes = NULL;
hipMalloc(&indexes, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
forward_maxpool_layer_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,in_h,in_w,in_c,stride_x,stride_y,size,pad,input,output,indexes);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
forward_maxpool_layer_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,in_h,in_w,in_c,stride_x,stride_y,size,pad,input,output,indexes);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
forward_maxpool_layer_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,in_h,in_w,in_c,stride_x,stride_y,size,pad,input,output,indexes);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | bc5e8e94dfbfaa3817e8707054707d06bc3c0297.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "forward_maxpool_layer_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int in_h = 1;
int in_w = 1;
int in_c = 1;
int stride_x = 2;
int stride_y = 2;
int size = XSIZE*YSIZE;
int pad = 2;
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int *indexes = NULL;
cudaMalloc(&indexes, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
forward_maxpool_layer_kernel<<<gridBlock,threadBlock>>>(n,in_h,in_w,in_c,stride_x,stride_y,size,pad,input,output,indexes);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
forward_maxpool_layer_kernel<<<gridBlock,threadBlock>>>(n,in_h,in_w,in_c,stride_x,stride_y,size,pad,input,output,indexes);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
forward_maxpool_layer_kernel<<<gridBlock,threadBlock>>>(n,in_h,in_w,in_c,stride_x,stride_y,size,pad,input,output,indexes);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
050efbff4d9cfed7eee74bcfa4f1fd6e46efa48d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlarfgx-v2.cu normal z -> d, Fri Jan 30 19:00:09 2015
*/
#include "common_magma.h"
#include "commonblas_d.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_d
//==============================================================================
__global__
void magma_dlarfgx_gpu_kernel( int n, double* dx0, double* dx,
double *dtau, double *dxnorm,
double *dA, int it)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ double scale;
__shared__ double xnorm;
double dxi;
if ( j < n-1 )
dxi = dx[j];
if ( i == 0 ) {
xnorm = *dxnorm;
#if (defined(PRECISION_s) || defined(PRECISION_d))
double alpha = *dx0;
double alphai = MAGMA_D_ZERO;
if ( (xnorm == 0 && alphai == MAGMA_D_ZERO ) || n == 1 )
#else
double alpha = *dx0;
double alphar = MAGMA_D_REAL(alpha), alphai = MAGMA_D_IMAG(alpha);
if ( (xnorm == 0 && alphai == MAGMA_D_ZERO ) || n == 0 )
#endif
{
*dtau = MAGMA_D_ZERO;
*dA = *dx0;
}
else {
#if (defined(PRECISION_s) || defined(PRECISION_d))
// no need to compute the norm as it is passed as input
double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = (beta - alpha) / beta;
//*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = beta;
}
scale = 1. / (alpha - beta);
#else
// no need to compute the norm as it is passed as input
double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = MAGMA_D_MAKE((beta - alphar)/beta, -alphai/beta);
//*dx0 = MAGMA_D_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = MAGMA_D_MAKE(beta, 0.);
}
alpha = MAGMA_D_MAKE( MAGMA_D_REAL(alpha) - beta, MAGMA_D_IMAG(alpha));
scale = MAGMA_D_DIV( MAGMA_D_ONE, alpha);
#endif
}
}
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_D_MUL(dxi, scale);
if (j<it){
*( dA-it+j) = *(dx0-it+j);
*(dx0-it+j) = MAGMA_D_MAKE(0., 0.);
}
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's dlarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_dlarfgx_gpu(
magma_int_t n,
magmaDouble_ptr dx0,
magmaDouble_ptr dx,
magmaDouble_ptr dtau,
magmaDouble_ptr dxnorm,
magmaDouble_ptr dA, magma_int_t iter)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_dlarfgx_gpu_kernel), dim3(blocks), dim3(threads), 0, magma_stream , n, dx0, dx, dtau, dxnorm, dA, iter);
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's dlarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_dlarfgtx_gpu(
magma_int_t n,
magmaDouble_ptr dx0,
magmaDouble_ptr dx,
magmaDouble_ptr dtau,
magmaDouble_ptr dxnorm,
magmaDouble_ptr dA, magma_int_t iter,
magmaDouble_ptr V, magma_int_t ldv,
magmaDouble_ptr T, magma_int_t ldt,
magmaDouble_ptr dwork)
{
/* Generate the elementary reflector H(iter) */
magma_dlarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, iter);
if (iter==0) {
double tt = MAGMA_D_ONE;
magmablas_dlacpy(MagmaUpperLower, 1, 1, dtau, 1, T+iter+iter*ldt, 1);
magma_dsetmatrix(1,1, &tt,1, dx0,1);
}
else {
/* Compute the iter-th column of T */
hipLaunchKernelGGL(( magma_dgemv_kernel3), dim3(iter), dim3(BLOCK_SIZE), 0, magma_stream , n, V, ldv, dx0, dwork, dtau );
hipLaunchKernelGGL(( magma_dtrmv_kernel2), dim3(iter), dim3(iter), 0, magma_stream , T, ldt, dwork, T+iter*ldt, dtau );
}
}
//==============================================================================
| 050efbff4d9cfed7eee74bcfa4f1fd6e46efa48d.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlarfgx-v2.cu normal z -> d, Fri Jan 30 19:00:09 2015
*/
#include "common_magma.h"
#include "commonblas_d.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_d
//==============================================================================
__global__
void magma_dlarfgx_gpu_kernel( int n, double* dx0, double* dx,
double *dtau, double *dxnorm,
double *dA, int it)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ double scale;
__shared__ double xnorm;
double dxi;
if ( j < n-1 )
dxi = dx[j];
if ( i == 0 ) {
xnorm = *dxnorm;
#if (defined(PRECISION_s) || defined(PRECISION_d))
double alpha = *dx0;
double alphai = MAGMA_D_ZERO;
if ( (xnorm == 0 && alphai == MAGMA_D_ZERO ) || n == 1 )
#else
double alpha = *dx0;
double alphar = MAGMA_D_REAL(alpha), alphai = MAGMA_D_IMAG(alpha);
if ( (xnorm == 0 && alphai == MAGMA_D_ZERO ) || n == 0 )
#endif
{
*dtau = MAGMA_D_ZERO;
*dA = *dx0;
}
else {
#if (defined(PRECISION_s) || defined(PRECISION_d))
// no need to compute the norm as it is passed as input
double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = (beta - alpha) / beta;
//*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = beta;
}
scale = 1. / (alpha - beta);
#else
// no need to compute the norm as it is passed as input
double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = MAGMA_D_MAKE((beta - alphar)/beta, -alphai/beta);
//*dx0 = MAGMA_D_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = MAGMA_D_MAKE(beta, 0.);
}
alpha = MAGMA_D_MAKE( MAGMA_D_REAL(alpha) - beta, MAGMA_D_IMAG(alpha));
scale = MAGMA_D_DIV( MAGMA_D_ONE, alpha);
#endif
}
}
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_D_MUL(dxi, scale);
if (j<it){
*( dA-it+j) = *(dx0-it+j);
*(dx0-it+j) = MAGMA_D_MAKE(0., 0.);
}
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's dlarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_dlarfgx_gpu(
magma_int_t n,
magmaDouble_ptr dx0,
magmaDouble_ptr dx,
magmaDouble_ptr dtau,
magmaDouble_ptr dxnorm,
magmaDouble_ptr dA, magma_int_t iter)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
magma_dlarfgx_gpu_kernel<<< blocks, threads, 0, magma_stream >>>( n, dx0, dx, dtau, dxnorm, dA, iter);
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's dlarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_dlarfgtx_gpu(
magma_int_t n,
magmaDouble_ptr dx0,
magmaDouble_ptr dx,
magmaDouble_ptr dtau,
magmaDouble_ptr dxnorm,
magmaDouble_ptr dA, magma_int_t iter,
magmaDouble_ptr V, magma_int_t ldv,
magmaDouble_ptr T, magma_int_t ldt,
magmaDouble_ptr dwork)
{
/* Generate the elementary reflector H(iter) */
magma_dlarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, iter);
if (iter==0) {
double tt = MAGMA_D_ONE;
magmablas_dlacpy(MagmaUpperLower, 1, 1, dtau, 1, T+iter+iter*ldt, 1);
magma_dsetmatrix(1,1, &tt,1, dx0,1);
}
else {
/* Compute the iter-th column of T */
magma_dgemv_kernel3<<< iter, BLOCK_SIZE, 0, magma_stream >>>( n, V, ldv, dx0, dwork, dtau );
magma_dtrmv_kernel2<<< iter, iter, 0, magma_stream >>>( T, ldt, dwork, T+iter*ldt, dtau );
}
}
//==============================================================================
|
8fdbb69a9b721ebdee0d9ab1bedacc03d57651dd.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include <thrust/random.h>
#include "cupoch/geometry/bruteforce_nn.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/registration/fast_global_registration.h"
#include "cupoch/registration/registration.h"
#include "cupoch/utility/platform.h"
namespace cupoch {
namespace registration {
namespace {
struct compute_tuple_constraint_functor {
compute_tuple_constraint_functor(
size_t ncorr,
const thrust::tuple<int, int>* corres_cross,
const Eigen::Vector3f* point_cloud_vec_fi_points,
const Eigen::Vector3f* point_cloud_vec_fj_points,
thrust::tuple<int, int>* corres_tuple,
float scale)
: ncorr_(ncorr),
corres_cross_(corres_cross),
point_cloud_vec_fi_points_(point_cloud_vec_fi_points),
point_cloud_vec_fj_points_(point_cloud_vec_fj_points),
corres_tuple_(corres_tuple),
scale_(scale){};
const int ncorr_;
const thrust::tuple<int, int>* corres_cross_;
const Eigen::Vector3f* point_cloud_vec_fi_points_;
const Eigen::Vector3f* point_cloud_vec_fj_points_;
thrust::tuple<int, int>* corres_tuple_;
const float scale_;
__device__ void operator()(size_t idx) {
int rand0, rand1, rand2;
int idi0, idi1, idi2, idj0, idj1, idj2;
thrust::default_random_engine eng;
thrust::uniform_int_distribution<int> dist(0, ncorr_ - 1);
eng.discard(idx);
rand0 = dist(eng);
rand1 = dist(eng);
rand2 = dist(eng);
idi0 = thrust::get<0>(corres_cross_[rand0]);
idj0 = thrust::get<1>(corres_cross_[rand0]);
idi1 = thrust::get<0>(corres_cross_[rand1]);
idj1 = thrust::get<1>(corres_cross_[rand1]);
idi2 = thrust::get<0>(corres_cross_[rand2]);
idj2 = thrust::get<1>(corres_cross_[rand2]);
// collect 3 points from i-th fragment
Eigen::Vector3f pti0 = point_cloud_vec_fi_points_[idi0];
Eigen::Vector3f pti1 = point_cloud_vec_fi_points_[idi1];
Eigen::Vector3f pti2 = point_cloud_vec_fi_points_[idi2];
float li0 = (pti0 - pti1).norm();
float li1 = (pti1 - pti2).norm();
float li2 = (pti2 - pti0).norm();
// collect 3 points from j-th fragment
Eigen::Vector3f ptj0 = point_cloud_vec_fj_points_[idj0];
Eigen::Vector3f ptj1 = point_cloud_vec_fj_points_[idj1];
Eigen::Vector3f ptj2 = point_cloud_vec_fj_points_[idj2];
float lj0 = (ptj0 - ptj1).norm();
float lj1 = (ptj1 - ptj2).norm();
float lj2 = (ptj2 - ptj0).norm();
// check tuple constraint
bool cond = (li0 * scale_ < lj0) && (lj0 < li0 / scale_) &&
(li1 * scale_ < lj1) && (lj1 < li1 / scale_) &&
(li2 * scale_ < lj2) && (lj2 < li2 / scale_);
thrust::tuple<int, int> invalid_idx = thrust::make_tuple(-1, -1);
corres_tuple_[3 * idx] =
(cond) ? thrust::make_tuple(idi0, idj0) : invalid_idx;
corres_tuple_[3 * idx + 1] =
(cond) ? thrust::make_tuple(idi1, idj1) : invalid_idx;
corres_tuple_[3 * idx + 2] =
(cond) ? thrust::make_tuple(idi2, idj2) : invalid_idx;
}
};
template <int Dim>
utility::device_vector<thrust::tuple<int, int>> AdvancedMatching(
const std::vector<geometry::PointCloud>& point_cloud_vec,
const std::vector<Feature<Dim>>& features_vec,
const FastGlobalRegistrationOption& option) {
// STEP 0) Swap source and target if necessary
int fi = 0, fj = 1;
utility::LogDebug("Advanced matching : [{:d} - {:d}]", fi, fj);
bool swapped = false;
if (point_cloud_vec[fj].points_.size() >
point_cloud_vec[fi].points_.size()) {
int temp = fi;
fi = fj;
fj = temp;
swapped = true;
}
// STEP 1) Initial matching
int nPti = int(point_cloud_vec[fi].points_.size());
int nPtj = int(point_cloud_vec[fj].points_.size());
utility::device_vector<int> corresK;
utility::device_vector<float> dis;
utility::device_vector<thrust::tuple<int, int>> corres;
corres.resize(nPti + nPtj);
geometry::BruteForceNN<Dim>(features_vec[fi].data_, features_vec[fj].data_,
corresK, dis);
thrust::copy(make_tuple_iterator(corresK.begin(),
thrust::make_counting_iterator<int>(0)),
make_tuple_iterator(
corresK.end(),
thrust::make_counting_iterator<int>(corresK.size())),
corres.begin());
geometry::BruteForceNN<Dim>(features_vec[fj].data_, features_vec[fi].data_,
corresK, dis);
thrust::copy(make_tuple_iterator(thrust::make_counting_iterator<int>(0),
corresK.begin()),
make_tuple_iterator(
thrust::make_counting_iterator<int>(corresK.size()),
corresK.end()),
corres.begin() + nPtj);
thrust::sort(utility::exec_policy(0)->on(0),
corres.begin(), corres.end());
utility::LogDebug("points are remained : {:d}", corres.size());
// STEP 2) CROSS CHECK
utility::LogDebug("\t[cross check] ");
utility::device_vector<thrust::tuple<int, int>> corres_cross(corres.size());
utility::device_vector<int> counts(corres.size());
auto end1 = thrust::reduce_by_key(corres.begin(), corres.end(),
thrust::make_constant_iterator<int>(1),
corres_cross.begin(), counts.begin());
auto end2 =
thrust::remove_if(corres_cross.begin(), end1.first, counts.begin(),
[] __device__(int cnt) { return cnt < 2; });
corres_cross.resize(thrust::distance(corres_cross.begin(), end2));
utility::LogDebug("points are remained : {:d}", corres_cross.size());
// STEP 3) TUPLE CONSTRAINT
utility::LogDebug("\t[tuple constraint] ");
float scale = option.tuple_scale_;
size_t ncorr = corres_cross.size();
size_t number_of_trial = ncorr * 100;
utility::device_vector<thrust::tuple<int, int>> corres_tuple(
3 * number_of_trial);
compute_tuple_constraint_functor func(
ncorr, thrust::raw_pointer_cast(corres_cross.data()),
thrust::raw_pointer_cast(point_cloud_vec[fi].points_.data()),
thrust::raw_pointer_cast(point_cloud_vec[fj].points_.data()),
thrust::raw_pointer_cast(corres_tuple.data()), scale);
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(number_of_trial), func);
auto end3 = thrust::remove_if(
corres_tuple.begin(), corres_tuple.end(),
[] __device__(const thrust::tuple<int, int>& corr) {
return thrust::get<0>(corr) < 0;
});
size_t n_res = thrust::distance(corres_tuple.begin(), end3);
corres_tuple.resize(::min((int)n_res, option.maximum_tuple_count_));
utility::LogDebug("{:d} tuples ({:d} trial, {:d} actual).",
corres_tuple.size(), number_of_trial, n_res);
if (swapped) {
thrust::for_each(corres_tuple.begin(), corres_tuple.end(),
[] __device__(thrust::tuple<int, int> & corr) {
thrust::swap(thrust::get<0>(corr),
thrust::get<1>(corr));
});
}
utility::LogDebug("\t[final] matches {:d}.", (int)corres_tuple.size());
return corres_tuple;
}
// Normalize scale of points. X' = (X-\mu)/scale
std::tuple<std::vector<Eigen::Vector3f>, float, float> NormalizePointCloud(
std::vector<geometry::PointCloud>& point_cloud_vec,
const FastGlobalRegistrationOption& option) {
int num = 2;
float scale = 0;
std::vector<Eigen::Vector3f> pcd_mean_vec;
float scale_global, scale_start;
for (int i = 0; i < num; ++i) {
Eigen::Vector3f mean =
thrust::reduce(point_cloud_vec[i].points_.begin(),
point_cloud_vec[i].points_.end(),
Eigen::Vector3f(0.0, 0.0, 0.0),
thrust::plus<Eigen::Vector3f>());
mean = mean / point_cloud_vec[i].points_.size();
pcd_mean_vec.push_back(mean);
utility::LogDebug("normalize points :: mean = [{:f} {:f} {:f}]",
mean(0), mean(1), mean(2));
thrust::for_each(
point_cloud_vec[i].points_.begin(),
point_cloud_vec[i].points_.end(),
[mean] __device__(Eigen::Vector3f & pt) { pt -= mean; });
scale = thrust::transform_reduce(
point_cloud_vec[i].points_.begin(),
point_cloud_vec[i].points_.end(),
[] __device__(const Eigen::Vector3f& pt) { return pt.norm(); },
scale, thrust::maximum<float>());
}
if (option.use_absolute_scale_) {
scale_global = 1.0;
scale_start = scale;
} else {
scale_global = scale;
scale_start = 1.0;
}
utility::LogDebug("normalize points :: global scale : {:f}", scale_global);
for (int i = 0; i < num; ++i) {
thrust::for_each(point_cloud_vec[i].points_.begin(),
point_cloud_vec[i].points_.end(),
[scale_global] __device__(Eigen::Vector3f & pt) {
pt /= scale_global;
});
}
return std::make_tuple(pcd_mean_vec, scale_global, scale_start);
}
struct compute_jacobian_functor {
compute_jacobian_functor(float par) : par_(par){};
const float par_;
__device__ thrust::tuple<Eigen::Matrix6f, Eigen::Vector6f> operator()(
const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f>& x) const {
Eigen::Vector3f p, q;
p = thrust::get<0>(x);
q = thrust::get<1>(x);
Eigen::Vector3f rpq = p - q;
float temp = par_ / (rpq.dot(rpq) + par_);
float s = temp * temp;
float r = 0;
Eigen::Matrix6f JTJ = Eigen::Matrix6f::Zero();
Eigen::Vector6f JTr = Eigen::Vector6f::Zero();
Eigen::Vector6f J = Eigen::Vector6f::Zero();
J(1) = -q(2);
J(2) = q(1);
J(3) = -1;
r = rpq(0);
JTJ += J * J.transpose() * s;
JTr += J * r * s;
J.setZero();
J(2) = -q(0);
J(0) = q(2);
J(4) = -1;
r = rpq(1);
JTJ += J * J.transpose() * s;
JTr += J * r * s;
J.setZero();
J(0) = -q(1);
J(1) = q(0);
J(5) = -1;
r = rpq(2);
JTJ += J * J.transpose() * s;
JTr += J * r * s;
return thrust::make_tuple(JTJ, JTr);
}
};
Eigen::Matrix4f OptimizePairwiseRegistration(
const std::vector<geometry::PointCloud>& point_cloud_vec,
const utility::device_vector<thrust::tuple<int, int>>& corres,
float scale_start,
const FastGlobalRegistrationOption& option) {
utility::LogDebug("Pairwise rigid pose optimization");
float par = scale_start;
int numIter = option.iteration_number_;
int i = 0, j = 1;
geometry::PointCloud point_cloud_copy_j = point_cloud_vec[j];
if (corres.size() < 10) return Eigen::Matrix4f::Identity();
Eigen::Matrix4f trans = Eigen::Matrix4f::Identity();
for (int itr = 0; itr < numIter; itr++) {
Eigen::Matrix6f JTJ = Eigen::Matrix6f::Zero();
Eigen::Vector6f JTr = Eigen::Vector6f::Zero();
compute_jacobian_functor func(par);
thrust::tie(JTJ, JTr) = thrust::transform_reduce(
make_tuple_iterator(
thrust::make_permutation_iterator(
point_cloud_vec[i].points_.begin(),
thrust::make_transform_iterator(
corres.begin(),
tuple_get_functor<0, int, int, int>())),
thrust::make_permutation_iterator(
point_cloud_copy_j.points_.begin(),
thrust::make_transform_iterator(
corres.begin(),
tuple_get_functor<1, int, int,
int>()))),
make_tuple_iterator(
thrust::make_permutation_iterator(
point_cloud_vec[i].points_.begin(),
thrust::make_transform_iterator(
corres.end(),
tuple_get_functor<0, int, int, int>())),
thrust::make_permutation_iterator(
point_cloud_copy_j.points_.begin(),
thrust::make_transform_iterator(
corres.end(),
tuple_get_functor<1, int, int,
int>()))),
func, thrust::make_tuple(JTJ, JTr),
add_tuple_functor<Eigen::Matrix6f, Eigen::Vector6f>());
bool success;
Eigen::Vector6f result;
thrust::tie(success, result) =
utility::SolveLinearSystemPSD<6>(-JTJ, JTr);
Eigen::Matrix4f delta = utility::TransformVector6fToMatrix4f(result);
trans = delta * trans;
point_cloud_copy_j.Transform(delta);
// graduated non-convexity.
if (option.decrease_mu_) {
if (itr % 4 == 0 && par > option.maximum_correspondence_distance_) {
par /= option.division_factor_;
}
}
}
return trans;
}
// Below line indicates how the transformation matrix aligns two point clouds
// e.g. T * point_cloud_vec[1] is aligned with point_cloud_vec[0].
Eigen::Matrix4f GetInvTransformationOriginalScale(
const Eigen::Matrix4f& transformation,
const std::vector<Eigen::Vector3f>& pcd_mean_vec,
float scale_global) {
Eigen::Matrix3f R = transformation.block<3, 3>(0, 0);
Eigen::Vector3f t = transformation.block<3, 1>(0, 3);
Eigen::Matrix4f transtemp = Eigen::Matrix4f::Zero();
transtemp.block<3, 3>(0, 0) = R.transpose();
transtemp.block<3, 1>(0, 3) =
-R.transpose() *
(-R * pcd_mean_vec[1] + t * scale_global + pcd_mean_vec[0]);
transtemp(3, 3) = 1;
return transtemp;
}
} // namespace
template<int Dim>
RegistrationResult FastGlobalRegistration(
const geometry::PointCloud& source,
const geometry::PointCloud& target,
const Feature<Dim>& source_feature,
const Feature<Dim>& target_feature,
const FastGlobalRegistrationOption& option /* =
FastGlobalRegistrationOption()*/) {
if (!source.HasPoints() || !target.HasPoints() ||
source_feature.IsEmpty() || target_feature.IsEmpty()) {
utility::LogError("Invalid source or target pointcloud.");
return RegistrationResult();
}
std::vector<geometry::PointCloud> point_cloud_vec;
geometry::PointCloud source_orig = source;
geometry::PointCloud target_orig = target;
point_cloud_vec.push_back(source);
point_cloud_vec.push_back(target);
std::vector<Feature<Dim>> features_vec;
features_vec.push_back(source_feature);
features_vec.push_back(target_feature);
float scale_global, scale_start;
std::vector<Eigen::Vector3f> pcd_mean_vec;
std::tie(pcd_mean_vec, scale_global, scale_start) =
NormalizePointCloud(point_cloud_vec, option);
utility::device_vector<thrust::tuple<int, int>> corres;
corres = AdvancedMatching<Dim>(point_cloud_vec, features_vec, option);
Eigen::Matrix4f transformation;
transformation = OptimizePairwiseRegistration(point_cloud_vec, corres,
scale_global, option);
// as the original code T * point_cloud_vec[1] is aligned with
// point_cloud_vec[0] matrix inverse is applied here.
return EvaluateRegistration(
source_orig, target_orig, option.maximum_correspondence_distance_,
GetInvTransformationOriginalScale(transformation, pcd_mean_vec,
scale_global));
}
template RegistrationResult FastGlobalRegistration<33>(
const geometry::PointCloud& source,
const geometry::PointCloud& target,
const Feature<33>& source_feature,
const Feature<33>& target_feature,
const FastGlobalRegistrationOption& option);
} // namespace registration
} // namespace cupoch | 8fdbb69a9b721ebdee0d9ab1bedacc03d57651dd.cu | /**
* Copyright (c) 2020 Neka-Nat
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
**/
#include <thrust/random.h>
#include "cupoch/geometry/bruteforce_nn.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/registration/fast_global_registration.h"
#include "cupoch/registration/registration.h"
#include "cupoch/utility/platform.h"
namespace cupoch {
namespace registration {
namespace {
struct compute_tuple_constraint_functor {
compute_tuple_constraint_functor(
size_t ncorr,
const thrust::tuple<int, int>* corres_cross,
const Eigen::Vector3f* point_cloud_vec_fi_points,
const Eigen::Vector3f* point_cloud_vec_fj_points,
thrust::tuple<int, int>* corres_tuple,
float scale)
: ncorr_(ncorr),
corres_cross_(corres_cross),
point_cloud_vec_fi_points_(point_cloud_vec_fi_points),
point_cloud_vec_fj_points_(point_cloud_vec_fj_points),
corres_tuple_(corres_tuple),
scale_(scale){};
const int ncorr_;
const thrust::tuple<int, int>* corres_cross_;
const Eigen::Vector3f* point_cloud_vec_fi_points_;
const Eigen::Vector3f* point_cloud_vec_fj_points_;
thrust::tuple<int, int>* corres_tuple_;
const float scale_;
__device__ void operator()(size_t idx) {
int rand0, rand1, rand2;
int idi0, idi1, idi2, idj0, idj1, idj2;
thrust::default_random_engine eng;
thrust::uniform_int_distribution<int> dist(0, ncorr_ - 1);
eng.discard(idx);
rand0 = dist(eng);
rand1 = dist(eng);
rand2 = dist(eng);
idi0 = thrust::get<0>(corres_cross_[rand0]);
idj0 = thrust::get<1>(corres_cross_[rand0]);
idi1 = thrust::get<0>(corres_cross_[rand1]);
idj1 = thrust::get<1>(corres_cross_[rand1]);
idi2 = thrust::get<0>(corres_cross_[rand2]);
idj2 = thrust::get<1>(corres_cross_[rand2]);
// collect 3 points from i-th fragment
Eigen::Vector3f pti0 = point_cloud_vec_fi_points_[idi0];
Eigen::Vector3f pti1 = point_cloud_vec_fi_points_[idi1];
Eigen::Vector3f pti2 = point_cloud_vec_fi_points_[idi2];
float li0 = (pti0 - pti1).norm();
float li1 = (pti1 - pti2).norm();
float li2 = (pti2 - pti0).norm();
// collect 3 points from j-th fragment
Eigen::Vector3f ptj0 = point_cloud_vec_fj_points_[idj0];
Eigen::Vector3f ptj1 = point_cloud_vec_fj_points_[idj1];
Eigen::Vector3f ptj2 = point_cloud_vec_fj_points_[idj2];
float lj0 = (ptj0 - ptj1).norm();
float lj1 = (ptj1 - ptj2).norm();
float lj2 = (ptj2 - ptj0).norm();
// check tuple constraint
bool cond = (li0 * scale_ < lj0) && (lj0 < li0 / scale_) &&
(li1 * scale_ < lj1) && (lj1 < li1 / scale_) &&
(li2 * scale_ < lj2) && (lj2 < li2 / scale_);
thrust::tuple<int, int> invalid_idx = thrust::make_tuple(-1, -1);
corres_tuple_[3 * idx] =
(cond) ? thrust::make_tuple(idi0, idj0) : invalid_idx;
corres_tuple_[3 * idx + 1] =
(cond) ? thrust::make_tuple(idi1, idj1) : invalid_idx;
corres_tuple_[3 * idx + 2] =
(cond) ? thrust::make_tuple(idi2, idj2) : invalid_idx;
}
};
template <int Dim>
utility::device_vector<thrust::tuple<int, int>> AdvancedMatching(
const std::vector<geometry::PointCloud>& point_cloud_vec,
const std::vector<Feature<Dim>>& features_vec,
const FastGlobalRegistrationOption& option) {
// STEP 0) Swap source and target if necessary
int fi = 0, fj = 1;
utility::LogDebug("Advanced matching : [{:d} - {:d}]", fi, fj);
bool swapped = false;
if (point_cloud_vec[fj].points_.size() >
point_cloud_vec[fi].points_.size()) {
int temp = fi;
fi = fj;
fj = temp;
swapped = true;
}
// STEP 1) Initial matching
int nPti = int(point_cloud_vec[fi].points_.size());
int nPtj = int(point_cloud_vec[fj].points_.size());
utility::device_vector<int> corresK;
utility::device_vector<float> dis;
utility::device_vector<thrust::tuple<int, int>> corres;
corres.resize(nPti + nPtj);
geometry::BruteForceNN<Dim>(features_vec[fi].data_, features_vec[fj].data_,
corresK, dis);
thrust::copy(make_tuple_iterator(corresK.begin(),
thrust::make_counting_iterator<int>(0)),
make_tuple_iterator(
corresK.end(),
thrust::make_counting_iterator<int>(corresK.size())),
corres.begin());
geometry::BruteForceNN<Dim>(features_vec[fj].data_, features_vec[fi].data_,
corresK, dis);
thrust::copy(make_tuple_iterator(thrust::make_counting_iterator<int>(0),
corresK.begin()),
make_tuple_iterator(
thrust::make_counting_iterator<int>(corresK.size()),
corresK.end()),
corres.begin() + nPtj);
thrust::sort(utility::exec_policy(0)->on(0),
corres.begin(), corres.end());
utility::LogDebug("points are remained : {:d}", corres.size());
// STEP 2) CROSS CHECK
utility::LogDebug("\t[cross check] ");
utility::device_vector<thrust::tuple<int, int>> corres_cross(corres.size());
utility::device_vector<int> counts(corres.size());
auto end1 = thrust::reduce_by_key(corres.begin(), corres.end(),
thrust::make_constant_iterator<int>(1),
corres_cross.begin(), counts.begin());
auto end2 =
thrust::remove_if(corres_cross.begin(), end1.first, counts.begin(),
[] __device__(int cnt) { return cnt < 2; });
corres_cross.resize(thrust::distance(corres_cross.begin(), end2));
utility::LogDebug("points are remained : {:d}", corres_cross.size());
// STEP 3) TUPLE CONSTRAINT
utility::LogDebug("\t[tuple constraint] ");
float scale = option.tuple_scale_;
size_t ncorr = corres_cross.size();
size_t number_of_trial = ncorr * 100;
utility::device_vector<thrust::tuple<int, int>> corres_tuple(
3 * number_of_trial);
compute_tuple_constraint_functor func(
ncorr, thrust::raw_pointer_cast(corres_cross.data()),
thrust::raw_pointer_cast(point_cloud_vec[fi].points_.data()),
thrust::raw_pointer_cast(point_cloud_vec[fj].points_.data()),
thrust::raw_pointer_cast(corres_tuple.data()), scale);
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(number_of_trial), func);
auto end3 = thrust::remove_if(
corres_tuple.begin(), corres_tuple.end(),
[] __device__(const thrust::tuple<int, int>& corr) {
return thrust::get<0>(corr) < 0;
});
size_t n_res = thrust::distance(corres_tuple.begin(), end3);
corres_tuple.resize(std::min((int)n_res, option.maximum_tuple_count_));
utility::LogDebug("{:d} tuples ({:d} trial, {:d} actual).",
corres_tuple.size(), number_of_trial, n_res);
if (swapped) {
thrust::for_each(corres_tuple.begin(), corres_tuple.end(),
[] __device__(thrust::tuple<int, int> & corr) {
thrust::swap(thrust::get<0>(corr),
thrust::get<1>(corr));
});
}
utility::LogDebug("\t[final] matches {:d}.", (int)corres_tuple.size());
return corres_tuple;
}
// Normalize scale of points. X' = (X-\mu)/scale
std::tuple<std::vector<Eigen::Vector3f>, float, float> NormalizePointCloud(
std::vector<geometry::PointCloud>& point_cloud_vec,
const FastGlobalRegistrationOption& option) {
int num = 2;
float scale = 0;
std::vector<Eigen::Vector3f> pcd_mean_vec;
float scale_global, scale_start;
for (int i = 0; i < num; ++i) {
Eigen::Vector3f mean =
thrust::reduce(point_cloud_vec[i].points_.begin(),
point_cloud_vec[i].points_.end(),
Eigen::Vector3f(0.0, 0.0, 0.0),
thrust::plus<Eigen::Vector3f>());
mean = mean / point_cloud_vec[i].points_.size();
pcd_mean_vec.push_back(mean);
utility::LogDebug("normalize points :: mean = [{:f} {:f} {:f}]",
mean(0), mean(1), mean(2));
thrust::for_each(
point_cloud_vec[i].points_.begin(),
point_cloud_vec[i].points_.end(),
[mean] __device__(Eigen::Vector3f & pt) { pt -= mean; });
scale = thrust::transform_reduce(
point_cloud_vec[i].points_.begin(),
point_cloud_vec[i].points_.end(),
[] __device__(const Eigen::Vector3f& pt) { return pt.norm(); },
scale, thrust::maximum<float>());
}
if (option.use_absolute_scale_) {
scale_global = 1.0;
scale_start = scale;
} else {
scale_global = scale;
scale_start = 1.0;
}
utility::LogDebug("normalize points :: global scale : {:f}", scale_global);
for (int i = 0; i < num; ++i) {
thrust::for_each(point_cloud_vec[i].points_.begin(),
point_cloud_vec[i].points_.end(),
[scale_global] __device__(Eigen::Vector3f & pt) {
pt /= scale_global;
});
}
return std::make_tuple(pcd_mean_vec, scale_global, scale_start);
}
struct compute_jacobian_functor {
compute_jacobian_functor(float par) : par_(par){};
const float par_;
__device__ thrust::tuple<Eigen::Matrix6f, Eigen::Vector6f> operator()(
const thrust::tuple<Eigen::Vector3f, Eigen::Vector3f>& x) const {
Eigen::Vector3f p, q;
p = thrust::get<0>(x);
q = thrust::get<1>(x);
Eigen::Vector3f rpq = p - q;
float temp = par_ / (rpq.dot(rpq) + par_);
float s = temp * temp;
float r = 0;
Eigen::Matrix6f JTJ = Eigen::Matrix6f::Zero();
Eigen::Vector6f JTr = Eigen::Vector6f::Zero();
Eigen::Vector6f J = Eigen::Vector6f::Zero();
J(1) = -q(2);
J(2) = q(1);
J(3) = -1;
r = rpq(0);
JTJ += J * J.transpose() * s;
JTr += J * r * s;
J.setZero();
J(2) = -q(0);
J(0) = q(2);
J(4) = -1;
r = rpq(1);
JTJ += J * J.transpose() * s;
JTr += J * r * s;
J.setZero();
J(0) = -q(1);
J(1) = q(0);
J(5) = -1;
r = rpq(2);
JTJ += J * J.transpose() * s;
JTr += J * r * s;
return thrust::make_tuple(JTJ, JTr);
}
};
Eigen::Matrix4f OptimizePairwiseRegistration(
const std::vector<geometry::PointCloud>& point_cloud_vec,
const utility::device_vector<thrust::tuple<int, int>>& corres,
float scale_start,
const FastGlobalRegistrationOption& option) {
utility::LogDebug("Pairwise rigid pose optimization");
float par = scale_start;
int numIter = option.iteration_number_;
int i = 0, j = 1;
geometry::PointCloud point_cloud_copy_j = point_cloud_vec[j];
if (corres.size() < 10) return Eigen::Matrix4f::Identity();
Eigen::Matrix4f trans = Eigen::Matrix4f::Identity();
for (int itr = 0; itr < numIter; itr++) {
Eigen::Matrix6f JTJ = Eigen::Matrix6f::Zero();
Eigen::Vector6f JTr = Eigen::Vector6f::Zero();
compute_jacobian_functor func(par);
thrust::tie(JTJ, JTr) = thrust::transform_reduce(
make_tuple_iterator(
thrust::make_permutation_iterator(
point_cloud_vec[i].points_.begin(),
thrust::make_transform_iterator(
corres.begin(),
tuple_get_functor<0, int, int, int>())),
thrust::make_permutation_iterator(
point_cloud_copy_j.points_.begin(),
thrust::make_transform_iterator(
corres.begin(),
tuple_get_functor<1, int, int,
int>()))),
make_tuple_iterator(
thrust::make_permutation_iterator(
point_cloud_vec[i].points_.begin(),
thrust::make_transform_iterator(
corres.end(),
tuple_get_functor<0, int, int, int>())),
thrust::make_permutation_iterator(
point_cloud_copy_j.points_.begin(),
thrust::make_transform_iterator(
corres.end(),
tuple_get_functor<1, int, int,
int>()))),
func, thrust::make_tuple(JTJ, JTr),
add_tuple_functor<Eigen::Matrix6f, Eigen::Vector6f>());
bool success;
Eigen::Vector6f result;
thrust::tie(success, result) =
utility::SolveLinearSystemPSD<6>(-JTJ, JTr);
Eigen::Matrix4f delta = utility::TransformVector6fToMatrix4f(result);
trans = delta * trans;
point_cloud_copy_j.Transform(delta);
// graduated non-convexity.
if (option.decrease_mu_) {
if (itr % 4 == 0 && par > option.maximum_correspondence_distance_) {
par /= option.division_factor_;
}
}
}
return trans;
}
// Below line indicates how the transformation matrix aligns two point clouds
// e.g. T * point_cloud_vec[1] is aligned with point_cloud_vec[0].
Eigen::Matrix4f GetInvTransformationOriginalScale(
const Eigen::Matrix4f& transformation,
const std::vector<Eigen::Vector3f>& pcd_mean_vec,
float scale_global) {
Eigen::Matrix3f R = transformation.block<3, 3>(0, 0);
Eigen::Vector3f t = transformation.block<3, 1>(0, 3);
Eigen::Matrix4f transtemp = Eigen::Matrix4f::Zero();
transtemp.block<3, 3>(0, 0) = R.transpose();
transtemp.block<3, 1>(0, 3) =
-R.transpose() *
(-R * pcd_mean_vec[1] + t * scale_global + pcd_mean_vec[0]);
transtemp(3, 3) = 1;
return transtemp;
}
} // namespace
template<int Dim>
RegistrationResult FastGlobalRegistration(
const geometry::PointCloud& source,
const geometry::PointCloud& target,
const Feature<Dim>& source_feature,
const Feature<Dim>& target_feature,
const FastGlobalRegistrationOption& option /* =
FastGlobalRegistrationOption()*/) {
if (!source.HasPoints() || !target.HasPoints() ||
source_feature.IsEmpty() || target_feature.IsEmpty()) {
utility::LogError("Invalid source or target pointcloud.");
return RegistrationResult();
}
std::vector<geometry::PointCloud> point_cloud_vec;
geometry::PointCloud source_orig = source;
geometry::PointCloud target_orig = target;
point_cloud_vec.push_back(source);
point_cloud_vec.push_back(target);
std::vector<Feature<Dim>> features_vec;
features_vec.push_back(source_feature);
features_vec.push_back(target_feature);
float scale_global, scale_start;
std::vector<Eigen::Vector3f> pcd_mean_vec;
std::tie(pcd_mean_vec, scale_global, scale_start) =
NormalizePointCloud(point_cloud_vec, option);
utility::device_vector<thrust::tuple<int, int>> corres;
corres = AdvancedMatching<Dim>(point_cloud_vec, features_vec, option);
Eigen::Matrix4f transformation;
transformation = OptimizePairwiseRegistration(point_cloud_vec, corres,
scale_global, option);
// as the original code T * point_cloud_vec[1] is aligned with
// point_cloud_vec[0] matrix inverse is applied here.
return EvaluateRegistration(
source_orig, target_orig, option.maximum_correspondence_distance_,
GetInvTransformationOriginalScale(transformation, pcd_mean_vec,
scale_global));
}
template RegistrationResult FastGlobalRegistration<33>(
const geometry::PointCloud& source,
const geometry::PointCloud& target,
const Feature<33>& source_feature,
const Feature<33>& target_feature,
const FastGlobalRegistrationOption& option);
} // namespace registration
} // namespace cupoch |
6c170f9aa46ef3b91665369b09807e9323f943cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "utils.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <string>
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/extrema.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include "loadSaveImage.h"
#include <stdio.h>
// simple cross correlation kernel copied from Mike's IPython Notebook
__global__ void naive_normalized_cross_correlation(
float *d_response, unsigned char *d_original, unsigned char *d_template,
int num_pixels_y, int num_pixels_x, int template_half_height,
int template_height, int template_half_width, int template_width,
int template_size, float template_mean) {
int ny = num_pixels_y;
int nx = num_pixels_x;
int knx = template_width;
int2 image_index_2d = make_int2((blockIdx.x * blockDim.x) + threadIdx.x,
(blockIdx.y * blockDim.y) + threadIdx.y);
int image_index_1d = (nx * image_index_2d.y) + image_index_2d.x;
if (image_index_2d.x < nx && image_index_2d.y < ny) {
//
// compute image mean
//
float image_sum = 0.0f;
for (int y = -template_half_height; y <= template_half_height; y++) {
for (int x = -template_half_width; x <= template_half_width; x++) {
int2 image_offset_index_2d =
make_int2(image_index_2d.x + x, image_index_2d.y + y);
int2 image_offset_index_2d_clamped =
make_int2(min(nx - 1, max(0, image_offset_index_2d.x)),
min(ny - 1, max(0, image_offset_index_2d.y)));
int image_offset_index_1d_clamped =
(nx * image_offset_index_2d_clamped.y) +
image_offset_index_2d_clamped.x;
unsigned char image_offset_value =
d_original[image_offset_index_1d_clamped];
image_sum += (float)image_offset_value;
}
}
float image_mean = image_sum / (float)template_size;
//
// compute sums
//
float sum_of_image_template_diff_products = 0.0f;
float sum_of_squared_image_diffs = 0.0f;
float sum_of_squared_template_diffs = 0.0f;
for (int y = -template_half_height; y <= template_half_height; y++) {
for (int x = -template_half_width; x <= template_half_width; x++) {
int2 image_offset_index_2d =
make_int2(image_index_2d.x + x, image_index_2d.y + y);
int2 image_offset_index_2d_clamped =
make_int2(min(nx - 1, max(0, image_offset_index_2d.x)),
min(ny - 1, max(0, image_offset_index_2d.y)));
int image_offset_index_1d_clamped =
(nx * image_offset_index_2d_clamped.y) +
image_offset_index_2d_clamped.x;
unsigned char image_offset_value =
d_original[image_offset_index_1d_clamped];
float image_diff = (float)image_offset_value - image_mean;
int2 template_index_2d =
make_int2(x + template_half_width, y + template_half_height);
int template_index_1d =
(knx * template_index_2d.y) + template_index_2d.x;
unsigned char template_value = d_template[template_index_1d];
float template_diff = template_value - template_mean;
float image_template_diff_product = image_offset_value * template_diff;
float squared_image_diff = image_diff * image_diff;
float squared_template_diff = template_diff * template_diff;
sum_of_image_template_diff_products += image_template_diff_product;
sum_of_squared_image_diffs += squared_image_diff;
sum_of_squared_template_diffs += squared_template_diff;
}
}
//
// compute final result
//
float result_value = 0.0f;
if (sum_of_squared_image_diffs != 0 && sum_of_squared_template_diffs != 0) {
result_value =
sum_of_image_template_diff_products /
sqrt(sum_of_squared_image_diffs * sum_of_squared_template_diffs);
}
d_response[image_index_1d] = result_value;
}
}
__global__ void remove_redness_from_coordinates(
const unsigned int *d_coordinates, unsigned char *d_r, unsigned char *d_b,
unsigned char *d_g, unsigned char *d_r_output, int num_coordinates,
int num_pixels_y, int num_pixels_x, int template_half_height,
int template_half_width) {
int ny = num_pixels_y;
int nx = num_pixels_x;
int global_index_1d = (blockIdx.x * blockDim.x) + threadIdx.x;
int imgSize = num_pixels_x * num_pixels_y;
if (global_index_1d < num_coordinates) {
unsigned int image_index_1d = d_coordinates[imgSize - global_index_1d - 1];
ushort2 image_index_2d = make_ushort2(image_index_1d % num_pixels_x,
image_index_1d / num_pixels_x);
for (int y = image_index_2d.y - template_half_height;
y <= image_index_2d.y + template_half_height; y++) {
for (int x = image_index_2d.x - template_half_width;
x <= image_index_2d.x + template_half_width; x++) {
int2 image_offset_index_2d = make_int2(x, y);
int2 image_offset_index_2d_clamped =
make_int2(min(nx - 1, max(0, image_offset_index_2d.x)),
min(ny - 1, max(0, image_offset_index_2d.y)));
int image_offset_index_1d_clamped =
(nx * image_offset_index_2d_clamped.y) +
image_offset_index_2d_clamped.x;
unsigned char g_value = d_g[image_offset_index_1d_clamped];
unsigned char b_value = d_b[image_offset_index_1d_clamped];
unsigned int gb_average = (g_value + b_value) / 2;
d_r_output[image_offset_index_1d_clamped] = (unsigned char)gb_average;
}
}
}
}
struct splitChannels
: thrust::unary_function<
uchar4, thrust::tuple<unsigned char, unsigned char, unsigned char>> {
__host__ __device__ thrust::tuple<unsigned char, unsigned char, unsigned char>
operator()(uchar4 pixel) {
return thrust::make_tuple(pixel.x, pixel.y, pixel.z);
}
};
struct combineChannels
: thrust::unary_function<
thrust::tuple<unsigned char, unsigned char, unsigned char>, uchar4> {
__host__ __device__ uchar4
operator()(thrust::tuple<unsigned char, unsigned char, unsigned char> t) {
return make_uchar4(thrust::get<0>(t), thrust::get<1>(t), thrust::get<2>(t),
255);
}
};
struct combineResponses
: thrust::unary_function<float, thrust::tuple<float, float, float>> {
__host__ __device__ float operator()(thrust::tuple<float, float, float> t) {
return thrust::get<0>(t) * thrust::get<1>(t) * thrust::get<2>(t);
}
};
// we need to save the input so we can remove the redeye for the output
static thrust::device_vector<unsigned char> d_red;
static thrust::device_vector<unsigned char> d_blue;
static thrust::device_vector<unsigned char> d_green;
static size_t numRowsImg;
static size_t numColsImg;
static size_t templateHalfWidth;
static size_t templateHalfHeight;
// return types are void since any internal error will be handled by quitting
// no point in returning error codes...
void preProcess(unsigned int **inputVals, unsigned int **inputPos,
unsigned int **outputVals, unsigned int **outputPos,
size_t &numElem, const std::string &filename,
const std::string &templateFilename) {
// make sure the context initializes ok
checkCudaErrors(hipFree(0));
uchar4 *inImg;
uchar4 *eyeTemplate;
size_t numRowsTemplate, numColsTemplate;
loadImageRGBA(filename, &inImg, &numRowsImg, &numColsImg);
loadImageRGBA(templateFilename, &eyeTemplate, &numRowsTemplate,
&numColsTemplate);
templateHalfWidth = (numColsTemplate - 1) / 2;
templateHalfHeight = (numRowsTemplate - 1) / 2;
// we need to split each image into its separate channels
// use thrust to demonstrate basic uses
numElem = numRowsImg * numColsImg;
size_t templateSize = numRowsTemplate * numColsTemplate;
thrust::device_vector<uchar4> d_Img(inImg, inImg + numRowsImg * numColsImg);
thrust::device_vector<uchar4> d_Template(
eyeTemplate, eyeTemplate + numRowsTemplate * numColsTemplate);
d_red.resize(numElem);
d_blue.resize(numElem);
d_green.resize(numElem);
thrust::device_vector<unsigned char> d_red_template(templateSize);
thrust::device_vector<unsigned char> d_blue_template(templateSize);
thrust::device_vector<unsigned char> d_green_template(templateSize);
// split the image
thrust::transform(d_Img.begin(), d_Img.end(),
thrust::make_zip_iterator(thrust::make_tuple(
d_red.begin(), d_blue.begin(), d_green.begin())),
splitChannels());
// split the template
thrust::transform(d_Template.begin(), d_Template.end(),
thrust::make_zip_iterator(thrust::make_tuple(
d_red_template.begin(), d_blue_template.begin(),
d_green_template.begin())),
splitChannels());
thrust::device_vector<float> d_red_response(numElem);
thrust::device_vector<float> d_blue_response(numElem);
thrust::device_vector<float> d_green_response(numElem);
// need to compute the mean for each template channel
unsigned int r_sum =
thrust::reduce(d_red_template.begin(), d_red_template.end(), 0);
unsigned int b_sum =
thrust::reduce(d_blue_template.begin(), d_blue_template.end(), 0);
unsigned int g_sum =
thrust::reduce(d_green_template.begin(), d_green_template.end(), 0);
float r_mean = (double)r_sum / templateSize;
float b_mean = (double)b_sum / templateSize;
float g_mean = (double)g_sum / templateSize;
const dim3 blockSize(32, 8, 1);
const dim3 gridSize((numColsImg + blockSize.x - 1) / blockSize.x,
(numRowsImg + blockSize.y - 1) / blockSize.y, 1);
// now compute the cross-correlations for each channel
hipLaunchKernelGGL(( naive_normalized_cross_correlation), dim3(gridSize), dim3(blockSize), 0, 0,
thrust::raw_pointer_cast(d_red_response.data()),
thrust::raw_pointer_cast(d_red.data()),
thrust::raw_pointer_cast(d_red_template.data()), numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate, templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, r_mean);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( naive_normalized_cross_correlation), dim3(gridSize), dim3(blockSize), 0, 0,
thrust::raw_pointer_cast(d_blue_response.data()),
thrust::raw_pointer_cast(d_blue.data()),
thrust::raw_pointer_cast(d_blue_template.data()), numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate, templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, b_mean);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( naive_normalized_cross_correlation), dim3(gridSize), dim3(blockSize), 0, 0,
thrust::raw_pointer_cast(d_green_response.data()),
thrust::raw_pointer_cast(d_green.data()),
thrust::raw_pointer_cast(d_green_template.data()), numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate, templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, g_mean);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// generate combined response - multiply all channels together
thrust::device_vector<float> d_combined_response(numElem);
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(d_red_response.begin(),
d_blue_response.begin(),
d_green_response.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
d_red_response.end(), d_blue_response.end(), d_green_response.end())),
d_combined_response.begin(), combineResponses());
// find max/min of response
typedef thrust::device_vector<float>::iterator floatIt;
thrust::pair<floatIt, floatIt> minmax = thrust::minmax_element(
d_combined_response.begin(), d_combined_response.end());
float bias = *minmax.first;
// we need to make all the numbers positive so that the students can sort them
// without any bit twiddling
thrust::transform(d_combined_response.begin(), d_combined_response.end(),
thrust::make_constant_iterator(-bias),
d_combined_response.begin(), thrust::plus<float>());
// now we need to create the 1-D coordinates that will be attached to the keys
thrust::device_vector<unsigned int> coords(numElem);
thrust::sequence(coords.begin(), coords.end()); //[0, ..., numElem - 1]
// allocate memory for output and copy since our device vectors will go out of
// scope and be deleted
checkCudaErrors(hipMalloc(inputVals, sizeof(unsigned int) * numElem));
checkCudaErrors(hipMalloc(inputPos, sizeof(unsigned int) * numElem));
checkCudaErrors(hipMalloc(outputVals, sizeof(unsigned int) * numElem));
checkCudaErrors(hipMalloc(outputPos, sizeof(unsigned int) * numElem));
hipMemcpy(*inputVals, thrust::raw_pointer_cast(d_combined_response.data()),
sizeof(unsigned int) * numElem, hipMemcpyDeviceToDevice);
hipMemcpy(*inputPos, thrust::raw_pointer_cast(coords.data()),
sizeof(unsigned int) * numElem, hipMemcpyDeviceToDevice);
checkCudaErrors(hipMemset(*outputVals, 0, sizeof(unsigned int) * numElem));
checkCudaErrors(hipMemset(*outputPos, 0, sizeof(unsigned int) * numElem));
}
void postProcess(const unsigned int *const outputVals,
const unsigned int *const outputPos, const size_t numElems,
const std::string &output_file) {
thrust::device_vector<unsigned char> d_output_red = d_red;
const dim3 blockSize(256, 1, 1);
const dim3 gridSize((40 + blockSize.x - 1) / blockSize.x, 1, 1);
hipLaunchKernelGGL(( remove_redness_from_coordinates), dim3(gridSize), dim3(blockSize), 0, 0,
outputPos, thrust::raw_pointer_cast(d_red.data()),
thrust::raw_pointer_cast(d_blue.data()),
thrust::raw_pointer_cast(d_green.data()),
thrust::raw_pointer_cast(d_output_red.data()), 40, numRowsImg, numColsImg,
9, 9);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
// combine the new red channel with original blue and green for output
thrust::device_vector<uchar4> d_outputImg(numElems);
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(
d_output_red.begin(), d_blue.begin(), d_green.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
d_output_red.end(), d_blue.end(), d_green.end())),
d_outputImg.begin(), combineChannels());
thrust::host_vector<uchar4> h_Img = d_outputImg;
saveImageRGBA(&h_Img[0], numRowsImg, numColsImg, output_file);
// Clear the global vectors otherwise something goes wrong trying to free them
d_red.clear();
d_red.shrink_to_fit();
d_blue.clear();
d_blue.shrink_to_fit();
d_green.clear();
d_green.shrink_to_fit();
}
| 6c170f9aa46ef3b91665369b09807e9323f943cb.cu | #include "utils.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <string>
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/extrema.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include "loadSaveImage.h"
#include <stdio.h>
// simple cross correlation kernel copied from Mike's IPython Notebook
__global__ void naive_normalized_cross_correlation(
float *d_response, unsigned char *d_original, unsigned char *d_template,
int num_pixels_y, int num_pixels_x, int template_half_height,
int template_height, int template_half_width, int template_width,
int template_size, float template_mean) {
int ny = num_pixels_y;
int nx = num_pixels_x;
int knx = template_width;
int2 image_index_2d = make_int2((blockIdx.x * blockDim.x) + threadIdx.x,
(blockIdx.y * blockDim.y) + threadIdx.y);
int image_index_1d = (nx * image_index_2d.y) + image_index_2d.x;
if (image_index_2d.x < nx && image_index_2d.y < ny) {
//
// compute image mean
//
float image_sum = 0.0f;
for (int y = -template_half_height; y <= template_half_height; y++) {
for (int x = -template_half_width; x <= template_half_width; x++) {
int2 image_offset_index_2d =
make_int2(image_index_2d.x + x, image_index_2d.y + y);
int2 image_offset_index_2d_clamped =
make_int2(min(nx - 1, max(0, image_offset_index_2d.x)),
min(ny - 1, max(0, image_offset_index_2d.y)));
int image_offset_index_1d_clamped =
(nx * image_offset_index_2d_clamped.y) +
image_offset_index_2d_clamped.x;
unsigned char image_offset_value =
d_original[image_offset_index_1d_clamped];
image_sum += (float)image_offset_value;
}
}
float image_mean = image_sum / (float)template_size;
//
// compute sums
//
float sum_of_image_template_diff_products = 0.0f;
float sum_of_squared_image_diffs = 0.0f;
float sum_of_squared_template_diffs = 0.0f;
for (int y = -template_half_height; y <= template_half_height; y++) {
for (int x = -template_half_width; x <= template_half_width; x++) {
int2 image_offset_index_2d =
make_int2(image_index_2d.x + x, image_index_2d.y + y);
int2 image_offset_index_2d_clamped =
make_int2(min(nx - 1, max(0, image_offset_index_2d.x)),
min(ny - 1, max(0, image_offset_index_2d.y)));
int image_offset_index_1d_clamped =
(nx * image_offset_index_2d_clamped.y) +
image_offset_index_2d_clamped.x;
unsigned char image_offset_value =
d_original[image_offset_index_1d_clamped];
float image_diff = (float)image_offset_value - image_mean;
int2 template_index_2d =
make_int2(x + template_half_width, y + template_half_height);
int template_index_1d =
(knx * template_index_2d.y) + template_index_2d.x;
unsigned char template_value = d_template[template_index_1d];
float template_diff = template_value - template_mean;
float image_template_diff_product = image_offset_value * template_diff;
float squared_image_diff = image_diff * image_diff;
float squared_template_diff = template_diff * template_diff;
sum_of_image_template_diff_products += image_template_diff_product;
sum_of_squared_image_diffs += squared_image_diff;
sum_of_squared_template_diffs += squared_template_diff;
}
}
//
// compute final result
//
float result_value = 0.0f;
if (sum_of_squared_image_diffs != 0 && sum_of_squared_template_diffs != 0) {
result_value =
sum_of_image_template_diff_products /
sqrt(sum_of_squared_image_diffs * sum_of_squared_template_diffs);
}
d_response[image_index_1d] = result_value;
}
}
__global__ void remove_redness_from_coordinates(
const unsigned int *d_coordinates, unsigned char *d_r, unsigned char *d_b,
unsigned char *d_g, unsigned char *d_r_output, int num_coordinates,
int num_pixels_y, int num_pixels_x, int template_half_height,
int template_half_width) {
int ny = num_pixels_y;
int nx = num_pixels_x;
int global_index_1d = (blockIdx.x * blockDim.x) + threadIdx.x;
int imgSize = num_pixels_x * num_pixels_y;
if (global_index_1d < num_coordinates) {
unsigned int image_index_1d = d_coordinates[imgSize - global_index_1d - 1];
ushort2 image_index_2d = make_ushort2(image_index_1d % num_pixels_x,
image_index_1d / num_pixels_x);
for (int y = image_index_2d.y - template_half_height;
y <= image_index_2d.y + template_half_height; y++) {
for (int x = image_index_2d.x - template_half_width;
x <= image_index_2d.x + template_half_width; x++) {
int2 image_offset_index_2d = make_int2(x, y);
int2 image_offset_index_2d_clamped =
make_int2(min(nx - 1, max(0, image_offset_index_2d.x)),
min(ny - 1, max(0, image_offset_index_2d.y)));
int image_offset_index_1d_clamped =
(nx * image_offset_index_2d_clamped.y) +
image_offset_index_2d_clamped.x;
unsigned char g_value = d_g[image_offset_index_1d_clamped];
unsigned char b_value = d_b[image_offset_index_1d_clamped];
unsigned int gb_average = (g_value + b_value) / 2;
d_r_output[image_offset_index_1d_clamped] = (unsigned char)gb_average;
}
}
}
}
struct splitChannels
: thrust::unary_function<
uchar4, thrust::tuple<unsigned char, unsigned char, unsigned char>> {
__host__ __device__ thrust::tuple<unsigned char, unsigned char, unsigned char>
operator()(uchar4 pixel) {
return thrust::make_tuple(pixel.x, pixel.y, pixel.z);
}
};
struct combineChannels
: thrust::unary_function<
thrust::tuple<unsigned char, unsigned char, unsigned char>, uchar4> {
__host__ __device__ uchar4
operator()(thrust::tuple<unsigned char, unsigned char, unsigned char> t) {
return make_uchar4(thrust::get<0>(t), thrust::get<1>(t), thrust::get<2>(t),
255);
}
};
struct combineResponses
: thrust::unary_function<float, thrust::tuple<float, float, float>> {
__host__ __device__ float operator()(thrust::tuple<float, float, float> t) {
return thrust::get<0>(t) * thrust::get<1>(t) * thrust::get<2>(t);
}
};
// we need to save the input so we can remove the redeye for the output
static thrust::device_vector<unsigned char> d_red;
static thrust::device_vector<unsigned char> d_blue;
static thrust::device_vector<unsigned char> d_green;
static size_t numRowsImg;
static size_t numColsImg;
static size_t templateHalfWidth;
static size_t templateHalfHeight;
// return types are void since any internal error will be handled by quitting
// no point in returning error codes...
void preProcess(unsigned int **inputVals, unsigned int **inputPos,
unsigned int **outputVals, unsigned int **outputPos,
size_t &numElem, const std::string &filename,
const std::string &templateFilename) {
// make sure the context initializes ok
checkCudaErrors(cudaFree(0));
uchar4 *inImg;
uchar4 *eyeTemplate;
size_t numRowsTemplate, numColsTemplate;
loadImageRGBA(filename, &inImg, &numRowsImg, &numColsImg);
loadImageRGBA(templateFilename, &eyeTemplate, &numRowsTemplate,
&numColsTemplate);
templateHalfWidth = (numColsTemplate - 1) / 2;
templateHalfHeight = (numRowsTemplate - 1) / 2;
// we need to split each image into its separate channels
// use thrust to demonstrate basic uses
numElem = numRowsImg * numColsImg;
size_t templateSize = numRowsTemplate * numColsTemplate;
thrust::device_vector<uchar4> d_Img(inImg, inImg + numRowsImg * numColsImg);
thrust::device_vector<uchar4> d_Template(
eyeTemplate, eyeTemplate + numRowsTemplate * numColsTemplate);
d_red.resize(numElem);
d_blue.resize(numElem);
d_green.resize(numElem);
thrust::device_vector<unsigned char> d_red_template(templateSize);
thrust::device_vector<unsigned char> d_blue_template(templateSize);
thrust::device_vector<unsigned char> d_green_template(templateSize);
// split the image
thrust::transform(d_Img.begin(), d_Img.end(),
thrust::make_zip_iterator(thrust::make_tuple(
d_red.begin(), d_blue.begin(), d_green.begin())),
splitChannels());
// split the template
thrust::transform(d_Template.begin(), d_Template.end(),
thrust::make_zip_iterator(thrust::make_tuple(
d_red_template.begin(), d_blue_template.begin(),
d_green_template.begin())),
splitChannels());
thrust::device_vector<float> d_red_response(numElem);
thrust::device_vector<float> d_blue_response(numElem);
thrust::device_vector<float> d_green_response(numElem);
// need to compute the mean for each template channel
unsigned int r_sum =
thrust::reduce(d_red_template.begin(), d_red_template.end(), 0);
unsigned int b_sum =
thrust::reduce(d_blue_template.begin(), d_blue_template.end(), 0);
unsigned int g_sum =
thrust::reduce(d_green_template.begin(), d_green_template.end(), 0);
float r_mean = (double)r_sum / templateSize;
float b_mean = (double)b_sum / templateSize;
float g_mean = (double)g_sum / templateSize;
const dim3 blockSize(32, 8, 1);
const dim3 gridSize((numColsImg + blockSize.x - 1) / blockSize.x,
(numRowsImg + blockSize.y - 1) / blockSize.y, 1);
// now compute the cross-correlations for each channel
naive_normalized_cross_correlation<<<gridSize, blockSize>>>(
thrust::raw_pointer_cast(d_red_response.data()),
thrust::raw_pointer_cast(d_red.data()),
thrust::raw_pointer_cast(d_red_template.data()), numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate, templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, r_mean);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
naive_normalized_cross_correlation<<<gridSize, blockSize>>>(
thrust::raw_pointer_cast(d_blue_response.data()),
thrust::raw_pointer_cast(d_blue.data()),
thrust::raw_pointer_cast(d_blue_template.data()), numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate, templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, b_mean);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
naive_normalized_cross_correlation<<<gridSize, blockSize>>>(
thrust::raw_pointer_cast(d_green_response.data()),
thrust::raw_pointer_cast(d_green.data()),
thrust::raw_pointer_cast(d_green_template.data()), numRowsImg, numColsImg,
templateHalfHeight, numRowsTemplate, templateHalfWidth, numColsTemplate,
numRowsTemplate * numColsTemplate, g_mean);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// generate combined response - multiply all channels together
thrust::device_vector<float> d_combined_response(numElem);
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(d_red_response.begin(),
d_blue_response.begin(),
d_green_response.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
d_red_response.end(), d_blue_response.end(), d_green_response.end())),
d_combined_response.begin(), combineResponses());
// find max/min of response
typedef thrust::device_vector<float>::iterator floatIt;
thrust::pair<floatIt, floatIt> minmax = thrust::minmax_element(
d_combined_response.begin(), d_combined_response.end());
float bias = *minmax.first;
// we need to make all the numbers positive so that the students can sort them
// without any bit twiddling
thrust::transform(d_combined_response.begin(), d_combined_response.end(),
thrust::make_constant_iterator(-bias),
d_combined_response.begin(), thrust::plus<float>());
// now we need to create the 1-D coordinates that will be attached to the keys
thrust::device_vector<unsigned int> coords(numElem);
thrust::sequence(coords.begin(), coords.end()); //[0, ..., numElem - 1]
// allocate memory for output and copy since our device vectors will go out of
// scope and be deleted
checkCudaErrors(cudaMalloc(inputVals, sizeof(unsigned int) * numElem));
checkCudaErrors(cudaMalloc(inputPos, sizeof(unsigned int) * numElem));
checkCudaErrors(cudaMalloc(outputVals, sizeof(unsigned int) * numElem));
checkCudaErrors(cudaMalloc(outputPos, sizeof(unsigned int) * numElem));
cudaMemcpy(*inputVals, thrust::raw_pointer_cast(d_combined_response.data()),
sizeof(unsigned int) * numElem, cudaMemcpyDeviceToDevice);
cudaMemcpy(*inputPos, thrust::raw_pointer_cast(coords.data()),
sizeof(unsigned int) * numElem, cudaMemcpyDeviceToDevice);
checkCudaErrors(cudaMemset(*outputVals, 0, sizeof(unsigned int) * numElem));
checkCudaErrors(cudaMemset(*outputPos, 0, sizeof(unsigned int) * numElem));
}
void postProcess(const unsigned int *const outputVals,
const unsigned int *const outputPos, const size_t numElems,
const std::string &output_file) {
thrust::device_vector<unsigned char> d_output_red = d_red;
const dim3 blockSize(256, 1, 1);
const dim3 gridSize((40 + blockSize.x - 1) / blockSize.x, 1, 1);
remove_redness_from_coordinates<<<gridSize, blockSize>>>(
outputPos, thrust::raw_pointer_cast(d_red.data()),
thrust::raw_pointer_cast(d_blue.data()),
thrust::raw_pointer_cast(d_green.data()),
thrust::raw_pointer_cast(d_output_red.data()), 40, numRowsImg, numColsImg,
9, 9);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// combine the new red channel with original blue and green for output
thrust::device_vector<uchar4> d_outputImg(numElems);
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(
d_output_red.begin(), d_blue.begin(), d_green.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
d_output_red.end(), d_blue.end(), d_green.end())),
d_outputImg.begin(), combineChannels());
thrust::host_vector<uchar4> h_Img = d_outputImg;
saveImageRGBA(&h_Img[0], numRowsImg, numColsImg, output_file);
// Clear the global vectors otherwise something goes wrong trying to free them
d_red.clear();
d_red.shrink_to_fit();
d_blue.clear();
d_blue.shrink_to_fit();
d_green.clear();
d_green.shrink_to_fit();
}
|
fba0456d72e24e9e5e5e4a32b636d0c84f0f5da7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Implements a threadsafe binary heap for use on a GPU
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
// includes, project
#include "cutil.h"
#define BASETABLE_SIZE_MAX 1000000
#define MAX_ENTRIES 65536
__device__ __noinline__ void __tbegin() { }
__device__ __noinline__ void __tcommit() { }
typedef int key_t;
typedef int value_t;
struct TableEntry {
key_t mKey;
value_t mValue;
unsigned mNext;
};
typedef struct TableEntry tTableEntry;
struct BaseEntry {
unsigned mIndex;
int mLock;
};
struct HashTable {
BaseEntry mValues[BASETABLE_SIZE_MAX];
};
typedef struct HashTable tHashTable;
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, const char** argv);
__global__ void kernel_buildtable_tm( tHashTable* g_hashtable, tTableEntry* g_entrypool, unsigned* g_keys, int g_baseTableSize );
__global__ void kernel_buildtable_atomic( tHashTable* g_hashtable, tTableEntry* g_entrypool, unsigned* g_keys, int g_baseTableSize );
extern "C"
int computeGold( int* gpuData, const int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, const char** argv)
{
runTest( argc, argv);
CUT_EXIT(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest( int argc, const char **argv)
{
hipDeviceProp_t deviceProp;
deviceProp.major = 0;
deviceProp.minor = 0;
int dev;
CUT_DEVICE_INIT(argc, argv);
CUDA_SAFE_CALL(hipChooseDevice(&dev, &deviceProp));
CUDA_SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev));
if(deviceProp.major > 1 || deviceProp.minor > 0)
{
printf("Using Device %d: \"%s\"\n", dev, deviceProp.name);
CUDA_SAFE_CALL(hipSetDevice(dev));
}
else
{
printf("There is no device supporting CUDA compute capability 1.1. Hopefully using emu\n");
//CUT_EXIT(argc, argv);
}
bool useTM = false;
useTM = cutCheckCmdLineFlag(argc, argv, "tm");
unsigned int timer = 0;
CUT_SAFE_CALL( cutCreateTimer( &timer));
CUT_SAFE_CALL( cutStartTimer( timer));
srand(2011); // set seed for rand()
int numThreads = 192;
int numBlocks = 120;
int baseTableSize = 8192;
cutGetCmdLineArgumenti(argc, argv, "numThreads", &numThreads);
cutGetCmdLineArgumenti(argc, argv, "numBlocks", &numBlocks);
cutGetCmdLineArgumenti(argc, argv, "hashEntries", &baseTableSize);
assert(numThreads >= 0);
assert(numBlocks >= 0);
assert(MAX_ENTRIES > (numThreads * numBlocks + 1));
assert(baseTableSize >= 0 && baseTableSize <= BASETABLE_SIZE_MAX);
printf("Number of hash entries = %u\n", baseTableSize);
printf("Number of threads = %u\n", numThreads*numBlocks);
// allocate host copy:
tHashTable* h_hashtable = (tHashTable*)(calloc(1, sizeof(tHashTable)));
tTableEntry* h_entries = (tTableEntry*)(calloc(MAX_ENTRIES, sizeof(tTableEntry)));
unsigned* h_keys = (unsigned*)(calloc(MAX_ENTRIES, sizeof(unsigned)));
// and device copy
tHashTable* d_hashtable;
tTableEntry* d_entries;
unsigned* d_keys;
// Build keys
for(unsigned i=0; i<MAX_ENTRIES; i++) {
h_keys[i] = rand();
}
CUDA_SAFE_CALL( hipMalloc( (void**) &d_hashtable, sizeof(tHashTable)));
CUDA_SAFE_CALL( hipMemcpy( d_hashtable, h_hashtable, sizeof(tHashTable), hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMalloc( (void**) &d_entries, sizeof(tTableEntry)*MAX_ENTRIES ) );
CUDA_SAFE_CALL( hipMemcpy( d_entries, h_entries, sizeof(tTableEntry)*MAX_ENTRIES, hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMalloc( (void**) &d_keys, sizeof(unsigned)*MAX_ENTRIES ) );
CUDA_SAFE_CALL( hipMemcpy( d_keys, h_keys, sizeof(unsigned)*MAX_ENTRIES, hipMemcpyHostToDevice) );
// execute the first kernel, this throws some data into the kernel for testing...
if(useTM) {
hipLaunchKernelGGL(( kernel_buildtable_tm), dim3(numBlocks), dim3(numThreads), 0, 0, d_hashtable, d_entries, d_keys, baseTableSize);
} else {
hipLaunchKernelGGL(( kernel_buildtable_atomic), dim3(numBlocks), dim3(numThreads), 0, 0, d_hashtable, d_entries, d_keys, baseTableSize);
}
CUT_CHECK_ERROR("Kernel execution failed");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
//Copy result from device to host
CUDA_SAFE_CALL( hipMemcpy( h_hashtable, d_hashtable, sizeof(tHashTable),
hipMemcpyDeviceToHost) );
CUDA_SAFE_CALL( hipMemcpy( h_entries, d_entries,
sizeof(tTableEntry)*MAX_ENTRIES, hipMemcpyDeviceToHost) );
#ifdef DEBUG
for( int i = 0; i < MAX_ENTRIES; ++i ) {
if( h_entries[i].mValue )
printf(" %u : %u -> %u\n", i, h_entries[i].mKey, h_entries[i].mValue );
}
#endif
#define DEBUG
// error checking
int nInsertedEntries = 0;
for (int h = 0; h < baseTableSize; h++) {
unsigned entry_id = h_hashtable->mValues[h].mIndex;
while (entry_id != 0) {
tTableEntry& tentry = h_entries[entry_id];
#ifdef DEBUG
if ( (tentry.mKey != h_keys[tentry.mValue]) || (tentry.mKey % baseTableSize != h)) {
printf(" table[%d] -> %u : %u -> %u\n", h, entry_id, tentry.mKey, tentry.mValue);
}
#else
assert(tentry.mKey == h_keys[tentry.mValue]); // key-value consistency
assert((tentry.mKey % baseTableSize) == h); // key-hash consistency
#endif
entry_id = tentry.mNext;
nInsertedEntries += 1;
}
}
printf("nInsertedEntries = %d\n", nInsertedEntries);
assert(nInsertedEntries == (numThreads * numBlocks));
CUT_SAFE_CALL( cutStopTimer( timer));
printf( "Processing time: %f (ms)\n", cutGetTimerValue( timer));
CUT_SAFE_CALL( cutDeleteTimer( timer));
// cleanup memory
free(h_hashtable);
CUDA_SAFE_CALL(hipFree(d_hashtable));
printf("TEST PASSED\n");
}
__device__ void add_to_hash_tm( tHashTable *g_hashtable, tTableEntry* g_entrypool, unsigned key, unsigned value, int g_baseTableSize )
{
const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned hash = key % g_baseTableSize;
unsigned pool_slot = tid+1; // reserve zero for null
BaseEntry *base = &g_hashtable->mValues[hash];
tTableEntry *ent = &g_entrypool[pool_slot];
// need something fancier if each thread can have more than one entry
// TODO: implement something like Hoard (ASPLOS 2000) for CUDA
ent->mKey = key;
ent->mValue = value;
ent->mNext = base->mIndex;
g_hashtable->mValues[hash].mIndex = pool_slot;
}
__device__ void add_to_hash_atomic( tHashTable *g_hashtable, tTableEntry* g_entrypool, unsigned key, unsigned value, int g_baseTableSize )
{
const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned hash = key % g_baseTableSize;
unsigned pool_slot = tid+1; // reserve zero for null
BaseEntry *base = &g_hashtable->mValues[hash];
tTableEntry *ent = &g_entrypool[pool_slot];
// need something fancier if each thread can have more than one entry
// TODO: implement something like Hoard (ASPLOS 2000) for CUDA
unsigned done = 0;
while ( !done ) {
if(atomicCAS(&base->mLock, 0, 1) == 0) {
ent->mKey = key;
ent->mValue = value;
ent->mNext = base->mIndex;
g_hashtable->mValues[hash].mIndex = pool_slot;
base->mLock = 0;
done = 1;
}
}
}
__global__ void kernel_buildtable_tm( tHashTable* g_hashtable, tTableEntry* g_entrypool, unsigned* g_keys, int g_baseTableSize )
{
const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned key, value;
__tbegin();
key = g_keys[tid+1];
value = tid+1;
add_to_hash_tm(g_hashtable,g_entrypool,key,value,g_baseTableSize);
__tcommit();
}
__global__ void kernel_buildtable_atomic( tHashTable* g_hashtable, tTableEntry* g_entrypool, unsigned* g_keys, int g_baseTableSize )
{
const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned key, value;
key = g_keys[tid+1];
value = tid+1;
add_to_hash_atomic(g_hashtable,g_entrypool,key,value,g_baseTableSize);
}
| fba0456d72e24e9e5e5e4a32b636d0c84f0f5da7.cu | /* Implements a threadsafe binary heap for use on a GPU
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
// includes, project
#include "cutil.h"
#define BASETABLE_SIZE_MAX 1000000
#define MAX_ENTRIES 65536
__device__ __noinline__ void __tbegin() { }
__device__ __noinline__ void __tcommit() { }
typedef int key_t;
typedef int value_t;
struct TableEntry {
key_t mKey;
value_t mValue;
unsigned mNext;
};
typedef struct TableEntry tTableEntry;
struct BaseEntry {
unsigned mIndex;
int mLock;
};
struct HashTable {
BaseEntry mValues[BASETABLE_SIZE_MAX];
};
typedef struct HashTable tHashTable;
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, const char** argv);
__global__ void kernel_buildtable_tm( tHashTable* g_hashtable, tTableEntry* g_entrypool, unsigned* g_keys, int g_baseTableSize );
__global__ void kernel_buildtable_atomic( tHashTable* g_hashtable, tTableEntry* g_entrypool, unsigned* g_keys, int g_baseTableSize );
extern "C"
int computeGold( int* gpuData, const int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, const char** argv)
{
runTest( argc, argv);
CUT_EXIT(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest( int argc, const char **argv)
{
cudaDeviceProp deviceProp;
deviceProp.major = 0;
deviceProp.minor = 0;
int dev;
CUT_DEVICE_INIT(argc, argv);
CUDA_SAFE_CALL(cudaChooseDevice(&dev, &deviceProp));
CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev));
if(deviceProp.major > 1 || deviceProp.minor > 0)
{
printf("Using Device %d: \"%s\"\n", dev, deviceProp.name);
CUDA_SAFE_CALL(cudaSetDevice(dev));
}
else
{
printf("There is no device supporting CUDA compute capability 1.1. Hopefully using emu\n");
//CUT_EXIT(argc, argv);
}
bool useTM = false;
useTM = cutCheckCmdLineFlag(argc, argv, "tm");
unsigned int timer = 0;
CUT_SAFE_CALL( cutCreateTimer( &timer));
CUT_SAFE_CALL( cutStartTimer( timer));
srand(2011); // set seed for rand()
int numThreads = 192;
int numBlocks = 120;
int baseTableSize = 8192;
cutGetCmdLineArgumenti(argc, argv, "numThreads", &numThreads);
cutGetCmdLineArgumenti(argc, argv, "numBlocks", &numBlocks);
cutGetCmdLineArgumenti(argc, argv, "hashEntries", &baseTableSize);
assert(numThreads >= 0);
assert(numBlocks >= 0);
assert(MAX_ENTRIES > (numThreads * numBlocks + 1));
assert(baseTableSize >= 0 && baseTableSize <= BASETABLE_SIZE_MAX);
printf("Number of hash entries = %u\n", baseTableSize);
printf("Number of threads = %u\n", numThreads*numBlocks);
// allocate host copy:
tHashTable* h_hashtable = (tHashTable*)(calloc(1, sizeof(tHashTable)));
tTableEntry* h_entries = (tTableEntry*)(calloc(MAX_ENTRIES, sizeof(tTableEntry)));
unsigned* h_keys = (unsigned*)(calloc(MAX_ENTRIES, sizeof(unsigned)));
// and device copy
tHashTable* d_hashtable;
tTableEntry* d_entries;
unsigned* d_keys;
// Build keys
for(unsigned i=0; i<MAX_ENTRIES; i++) {
h_keys[i] = rand();
}
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_hashtable, sizeof(tHashTable)));
CUDA_SAFE_CALL( cudaMemcpy( d_hashtable, h_hashtable, sizeof(tHashTable), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_entries, sizeof(tTableEntry)*MAX_ENTRIES ) );
CUDA_SAFE_CALL( cudaMemcpy( d_entries, h_entries, sizeof(tTableEntry)*MAX_ENTRIES, cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_keys, sizeof(unsigned)*MAX_ENTRIES ) );
CUDA_SAFE_CALL( cudaMemcpy( d_keys, h_keys, sizeof(unsigned)*MAX_ENTRIES, cudaMemcpyHostToDevice) );
// execute the first kernel, this throws some data into the kernel for testing...
if(useTM) {
kernel_buildtable_tm<<<numBlocks, numThreads>>>(d_hashtable, d_entries, d_keys, baseTableSize);
} else {
kernel_buildtable_atomic<<<numBlocks, numThreads>>>(d_hashtable, d_entries, d_keys, baseTableSize);
}
CUT_CHECK_ERROR("Kernel execution failed");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
//Copy result from device to host
CUDA_SAFE_CALL( cudaMemcpy( h_hashtable, d_hashtable, sizeof(tHashTable),
cudaMemcpyDeviceToHost) );
CUDA_SAFE_CALL( cudaMemcpy( h_entries, d_entries,
sizeof(tTableEntry)*MAX_ENTRIES, cudaMemcpyDeviceToHost) );
#ifdef DEBUG
for( int i = 0; i < MAX_ENTRIES; ++i ) {
if( h_entries[i].mValue )
printf(" %u : %u -> %u\n", i, h_entries[i].mKey, h_entries[i].mValue );
}
#endif
#define DEBUG
// error checking
int nInsertedEntries = 0;
for (int h = 0; h < baseTableSize; h++) {
unsigned entry_id = h_hashtable->mValues[h].mIndex;
while (entry_id != 0) {
tTableEntry& tentry = h_entries[entry_id];
#ifdef DEBUG
if ( (tentry.mKey != h_keys[tentry.mValue]) || (tentry.mKey % baseTableSize != h)) {
printf(" table[%d] -> %u : %u -> %u\n", h, entry_id, tentry.mKey, tentry.mValue);
}
#else
assert(tentry.mKey == h_keys[tentry.mValue]); // key-value consistency
assert((tentry.mKey % baseTableSize) == h); // key-hash consistency
#endif
entry_id = tentry.mNext;
nInsertedEntries += 1;
}
}
printf("nInsertedEntries = %d\n", nInsertedEntries);
assert(nInsertedEntries == (numThreads * numBlocks));
CUT_SAFE_CALL( cutStopTimer( timer));
printf( "Processing time: %f (ms)\n", cutGetTimerValue( timer));
CUT_SAFE_CALL( cutDeleteTimer( timer));
// cleanup memory
free(h_hashtable);
CUDA_SAFE_CALL(cudaFree(d_hashtable));
printf("TEST PASSED\n");
}
__device__ void add_to_hash_tm( tHashTable *g_hashtable, tTableEntry* g_entrypool, unsigned key, unsigned value, int g_baseTableSize )
{
const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned hash = key % g_baseTableSize;
unsigned pool_slot = tid+1; // reserve zero for null
BaseEntry *base = &g_hashtable->mValues[hash];
tTableEntry *ent = &g_entrypool[pool_slot];
// need something fancier if each thread can have more than one entry
// TODO: implement something like Hoard (ASPLOS 2000) for CUDA
ent->mKey = key;
ent->mValue = value;
ent->mNext = base->mIndex;
g_hashtable->mValues[hash].mIndex = pool_slot;
}
__device__ void add_to_hash_atomic( tHashTable *g_hashtable, tTableEntry* g_entrypool, unsigned key, unsigned value, int g_baseTableSize )
{
const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned hash = key % g_baseTableSize;
unsigned pool_slot = tid+1; // reserve zero for null
BaseEntry *base = &g_hashtable->mValues[hash];
tTableEntry *ent = &g_entrypool[pool_slot];
// need something fancier if each thread can have more than one entry
// TODO: implement something like Hoard (ASPLOS 2000) for CUDA
unsigned done = 0;
while ( !done ) {
if(atomicCAS(&base->mLock, 0, 1) == 0) {
ent->mKey = key;
ent->mValue = value;
ent->mNext = base->mIndex;
g_hashtable->mValues[hash].mIndex = pool_slot;
base->mLock = 0;
done = 1;
}
}
}
__global__ void kernel_buildtable_tm( tHashTable* g_hashtable, tTableEntry* g_entrypool, unsigned* g_keys, int g_baseTableSize )
{
const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned key, value;
__tbegin();
key = g_keys[tid+1];
value = tid+1;
add_to_hash_tm(g_hashtable,g_entrypool,key,value,g_baseTableSize);
__tcommit();
}
__global__ void kernel_buildtable_atomic( tHashTable* g_hashtable, tTableEntry* g_entrypool, unsigned* g_keys, int g_baseTableSize )
{
const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned key, value;
key = g_keys[tid+1];
value = tid+1;
add_to_hash_atomic(g_hashtable,g_entrypool,key,value,g_baseTableSize);
}
|
39d03a9ca8df9d9285fb871dcbdd9117b7e27c0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* helloCUDA.cu */
/****************************************************************************/
/* */
/* (C) 2010 Texas Advanced Computing Center. */
/* */
/* For information, contact Frank Willmore: willmore@tacc.utexas.edu */
/* */
/* Shareable in accordance with TACC and University of Texas policies. */
/* */
/****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#define BLOCKS_PER_GRID 16
#define THREADS_PER_BLOCK 16
#define N_POPULATION 10000
__device__ char d_data_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK][N_POPULATION];
__device__ int d_sum_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
__device__ float d_mean_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
__device__ float d_std_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
// reduce over N_POPULATION
__global__ void calculateMean()
{
// auto variables other than arrays are register
int index;
int sum=0;
int sample_number;
// get the sample number for this thread
sample_number = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
// calculate the sum for this thread
for (index=0; index<N_POPULATION; index++) sum += d_data_array[sample_number][index];
// record the sum and mean in global memory
d_sum_array[sample_number] = sum;
d_mean_array[sample_number] = (float)sum / N_POPULATION;
}
// use persistent data (sum) to calculate variance
__global__ void calculateStandardDeviation()
{
int sample_number;
float v_sum = 0.0f;
float delta;
float variance;
int index;
// get the sample number for this thread
sample_number = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
// calculate the sum for this thread
for (index=0; index<N_POPULATION; index++)
{
delta = (float)d_data_array[sample_number][index] - d_mean_array[sample_number];
v_sum += delta * delta;
}
variance = v_sum / N_POPULATION;
d_std_array[sample_number] = sqrt(variance);
}
int main(int argc, char* argv[])
{
int i, j;
FILE *fptr;
char h_data_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK][N_POPULATION];
int h_sum_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
float h_std_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
float h_mean_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
size_t size = sizeof(h_data_array);
// generate an array with random data, then copy it to the device
fptr = fopen("/dev/urandom", "r");
fread(h_data_array, size, 1, fptr);
fclose(fptr);
// for (i=0; i<(BLOCKS_PER_GRID * THREADS_PER_BLOCK); i++) for (j=0; j<N_POPULATION; j++) printf("[%2d] = \t%d\n", i, h_data_array[i][j]);
dim3 grid_dimension(BLOCKS_PER_GRID);
dim3 block_dimension(THREADS_PER_BLOCK);
/////////////////// sum //////////////////////////////////////
hipError_t r = hipMemcpyToSymbol(d_data_array, h_data_array, sizeof(h_data_array), 0, hipMemcpyHostToDevice);
hipDeviceSynchronize(); // block until the device has completed
assert(r == hipSuccess);
hipLaunchKernelGGL(( calculateMean), dim3(grid_dimension), dim3(block_dimension) , 0, 0, );
hipLaunchKernelGGL(( calculateMean), dim3(BLOCKS_PER_GRID), dim3(THREADS_PER_BLOCK) , 0, 0, );
hipDeviceSynchronize(); // block until the device has completed
assert(r == hipSuccess);
r = hipMemcpyFromSymbol(h_sum_array, d_sum_array, sizeof(h_sum_array), 0, hipMemcpyDeviceToHost);
hipDeviceSynchronize(); // block until the device has completed
assert(r == hipSuccess);
/////////////////// standard deviation //////////////////////
// memset(h_std_array, 0, sizeof(h_std_array));
// r = hipMemcpyToSymbol(d_std_array, h_std_array, sizeof(h_std_array), 0, hipMemcpyHostToDevice);
// hipDeviceSynchronize(); // block until the device has completed
// assert(r == hipSuccess);
hipLaunchKernelGGL(( calculateStandardDeviation), dim3(grid_dimension), dim3(block_dimension) , 0, 0, );
hipDeviceSynchronize(); // block until the device has completed
assert(r == hipSuccess);
r = hipMemcpyFromSymbol(h_mean_array, d_mean_array, sizeof(h_mean_array), 0, hipMemcpyDeviceToHost);
hipDeviceSynchronize(); // block until the device has completed
assert(r == hipSuccess);
r = hipMemcpyFromSymbol(h_std_array, d_std_array, sizeof(h_std_array), 0, hipMemcpyDeviceToHost);
hipDeviceSynchronize(); // block until the device has completed
assert(r == hipSuccess);
for (i=0; i< BLOCKS_PER_GRID * THREADS_PER_BLOCK; i++) printf("[%2d] = \t%d\t%f\t%f\n", i, h_sum_array[i], h_mean_array[i], h_std_array[i]);
}
| 39d03a9ca8df9d9285fb871dcbdd9117b7e27c0a.cu | /* helloCUDA.cu */
/****************************************************************************/
/* */
/* (C) 2010 Texas Advanced Computing Center. */
/* */
/* For information, contact Frank Willmore: willmore@tacc.utexas.edu */
/* */
/* Shareable in accordance with TACC and University of Texas policies. */
/* */
/****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#define BLOCKS_PER_GRID 16
#define THREADS_PER_BLOCK 16
#define N_POPULATION 10000
__device__ char d_data_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK][N_POPULATION];
__device__ int d_sum_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
__device__ float d_mean_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
__device__ float d_std_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
// reduce over N_POPULATION
__global__ void calculateMean()
{
// auto variables other than arrays are register
int index;
int sum=0;
int sample_number;
// get the sample number for this thread
sample_number = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
// calculate the sum for this thread
for (index=0; index<N_POPULATION; index++) sum += d_data_array[sample_number][index];
// record the sum and mean in global memory
d_sum_array[sample_number] = sum;
d_mean_array[sample_number] = (float)sum / N_POPULATION;
}
// use persistent data (sum) to calculate variance
__global__ void calculateStandardDeviation()
{
int sample_number;
float v_sum = 0.0f;
float delta;
float variance;
int index;
// get the sample number for this thread
sample_number = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
// calculate the sum for this thread
for (index=0; index<N_POPULATION; index++)
{
delta = (float)d_data_array[sample_number][index] - d_mean_array[sample_number];
v_sum += delta * delta;
}
variance = v_sum / N_POPULATION;
d_std_array[sample_number] = sqrt(variance);
}
int main(int argc, char* argv[])
{
int i, j;
FILE *fptr;
char h_data_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK][N_POPULATION];
int h_sum_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
float h_std_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
float h_mean_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
size_t size = sizeof(h_data_array);
// generate an array with random data, then copy it to the device
fptr = fopen("/dev/urandom", "r");
fread(h_data_array, size, 1, fptr);
fclose(fptr);
// for (i=0; i<(BLOCKS_PER_GRID * THREADS_PER_BLOCK); i++) for (j=0; j<N_POPULATION; j++) printf("[%2d] = \t%d\n", i, h_data_array[i][j]);
dim3 grid_dimension(BLOCKS_PER_GRID);
dim3 block_dimension(THREADS_PER_BLOCK);
/////////////////// sum //////////////////////////////////////
cudaError_t r = cudaMemcpyToSymbol(d_data_array, h_data_array, sizeof(h_data_array), 0, cudaMemcpyHostToDevice);
cudaThreadSynchronize(); // block until the device has completed
assert(r == cudaSuccess);
calculateMean<<< grid_dimension, block_dimension >>>();
calculateMean<<< BLOCKS_PER_GRID, THREADS_PER_BLOCK >>>();
cudaThreadSynchronize(); // block until the device has completed
assert(r == cudaSuccess);
r = cudaMemcpyFromSymbol(h_sum_array, d_sum_array, sizeof(h_sum_array), 0, cudaMemcpyDeviceToHost);
cudaThreadSynchronize(); // block until the device has completed
assert(r == cudaSuccess);
/////////////////// standard deviation //////////////////////
// memset(h_std_array, 0, sizeof(h_std_array));
// r = cudaMemcpyToSymbol(d_std_array, h_std_array, sizeof(h_std_array), 0, cudaMemcpyHostToDevice);
// cudaThreadSynchronize(); // block until the device has completed
// assert(r == cudaSuccess);
calculateStandardDeviation<<< grid_dimension, block_dimension >>>();
cudaThreadSynchronize(); // block until the device has completed
assert(r == cudaSuccess);
r = cudaMemcpyFromSymbol(h_mean_array, d_mean_array, sizeof(h_mean_array), 0, cudaMemcpyDeviceToHost);
cudaThreadSynchronize(); // block until the device has completed
assert(r == cudaSuccess);
r = cudaMemcpyFromSymbol(h_std_array, d_std_array, sizeof(h_std_array), 0, cudaMemcpyDeviceToHost);
cudaThreadSynchronize(); // block until the device has completed
assert(r == cudaSuccess);
for (i=0; i< BLOCKS_PER_GRID * THREADS_PER_BLOCK; i++) printf("[%2d] = \t%d\t%f\t%f\n", i, h_sum_array[i], h_mean_array[i], h_std_array[i]);
}
|
8315a733936e4953034dc37e5ae284de475a2b29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
// MNRT License
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2010 Mathias Neumann, www.maneumann.com.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
//
// 3. Neither the name Mathias Neumann, nor the names of contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \file GPU\photon_build.cu
///
/// \brief Kernels for photon map construction, specifically photon tracing.
///
/// \author Mathias Neumann
/// \date 09.04.2010
/// \ingroup globalillum
////////////////////////////////////////////////////////////////////////////////////////////////////
#include "KernelDefs.h"
#include "photon_dev.h"
#include "sample_dev.h"
/// Light data constant memory variable.
__constant__ LightData c_Lights;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \KERNELS
////////////////////////////////////////////////////////////////////////////////////////////////////
//@{
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_SpawnLightPhotons(uint photonOffset, uint numToSpawn,
/// float3 worldCenter, float worldRadius, PhotonData outPhotonSpawn)
///
/// \brief Spawns photons from the given light source.
///
/// The number of photons is given fixed and controls the number of threads to spawn, as
/// each thread handles a photon.
///
/// \author Mathias Neumann
/// \date 09.04.2010
///
/// \param photonOffset The photon offset (depends on how many photons spawned already). Is
/// used to compute distinct members of the halton sequence for all
/// spawned photons.
/// \param numToSpawn Number of photons to spawn.
/// \param worldCenter The world center. Used for directional lights.
/// \param worldRadius The world radius. Used for directional lights.
/// \param outPhotonSpawn Will contain the spawned photons. All previous contents are overwritten.
/// Remember to set the new photon count after kernel execution.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_SpawnLightPhotons(uint photonOffset, uint numToSpawn,
float3 worldCenter, float worldRadius,
PhotonData outPhotonSpawn)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < numToSpawn)
{
float3 myPos, myFlux, myDir;
float rnd1 = dev_RadicalInverse(photonOffset + tid+1, 2);
float rnd2 = dev_RadicalInverse(photonOffset + tid+1, 3);
float3 ptL = c_Lights.position;
LightType type = c_Lights.type;
if(type == Light_Point)
{
myPos = ptL;
// Note that point lights store intensity instead of emitted radiance.
float3 intensity = c_Lights.L_emit;
// Power (flux) emitted by a point light is found by integrating intensity over the
// entire sphere of directions, see PBR page 603. This is exactly the same as we would
// get when dividing the intensity by the PDF of sampling the point light source,
// 1.f / (4.f * MN_PI).
myFlux = intensity * 4.f * MN_PI;
// Generate photons direction using QRNG. We use the 3D Halton sequence we can
// generate from the radical inverse function. As proposed in "Physically based
// rendering", we use the first 3 prime numbers as bases.
myDir.x = 2.f * rnd1 - 1.f;
myDir.y = 2.f * rnd2 - 1.f;
myDir.z = 2.f * dev_RadicalInverse(photonOffset + tid+1, 5) - 1.f;
// Avoid myDir = 0.
if(myDir.x == myDir.y && myDir.y == myDir.z && myDir.z == 0.f)
myDir.x = 1.f;
// Normalize direction.
myDir = normalize(myDir);
}
else if(type == Light_AreaDisc)
{
float3 discNormal = c_Lights.direction;
float3 L_emit = c_Lights.L_emit;
float discRadius = c_Lights.areaRadius;
// Sample position on disc.
myPos = dev_SampleGeneralDisc(ptL, discNormal, discRadius, rnd1, rnd2);
// Cosine-sample direction.
float rnd3 = dev_RadicalInverse(photonOffset + tid+1, 5);
float rnd4 = dev_RadicalInverse(photonOffset + tid+1, 7);
float pdfCosine;
myDir = dev_SampleHemisphereCosine(discNormal, rnd3, rnd4, &pdfCosine);
// Compute PDF for sampling directions from the area light. That is the product
// of the PDF for sampling the ray origin myPos with respect to the surface area
// with the PDF of sampling the direction.
// See PBR, p704. Note that the PDF for sampling a point on the surface area is
// just 1 / area.
float surfaceArea = MN_PI * discRadius * discRadius;
float pdf = pdfCosine / surfaceArea;
// No need to check whether we are emiting to the wrong side.
if(pdf != 0.f)
myFlux = fabsf(dot(myDir, discNormal)) * L_emit / pdf;
else
myFlux = make_float3(0.f);
}
else if(type == Light_AreaRect)
{
float3 rectNormal = c_Lights.direction;
float3 L_emit = c_Lights.L_emit;
// Sample position on rect.
float3 v1 = c_Lights.areaV1;
float3 v2 = c_Lights.areaV2;
myPos = ptL + rnd1*v1 + rnd2*v2;
// Cosine-sample direction.
float rnd3 = dev_RadicalInverse(photonOffset + tid+1, 5);
float rnd4 = dev_RadicalInverse(photonOffset + tid+1, 7);
float pdfCosine;
myDir = dev_SampleHemisphereCosine(rectNormal, rnd3, rnd4, &pdfCosine);
// Compute PDF for sampling directions from the area light. That is the product
// of the PDF for sampling the ray origin myPos with respect to the surface area
// with the PDF of sampling the direction.
// See PBR, p704. Note that the PDF for sampling a point on the surface area is
// just 1 / area.
float surfaceArea = length(v1) * length(v2);
float pdf = pdfCosine / surfaceArea;
// No need to check whether we are emiting to the wrong side.
if(pdf != 0.f)
myFlux = fabsf(dot(myDir, rectNormal)) * L_emit / pdf;
else
myFlux = make_float3(0.f);
}
else if(type == Light_Directional)
{
float3 lightDir = c_Lights.direction;
float3 L_emit = c_Lights.L_emit;
float3 ptDisk = dev_SampleGeneralDisc(worldCenter, lightDir, worldRadius,
rnd1, rnd2);
float pdf = MN_INV_PI / (worldRadius*worldRadius);
// Now set photon properties.
myPos = ptDisk - worldRadius * lightDir; // Offset point
myDir = lightDir;
myFlux = L_emit / pdf;
}
// Store the photon in the spawn list.
dev_PhotonStore(outPhotonSpawn, tid, myPos, myDir, myFlux);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_RussianRoulette(float* d_randoms, float contProbability,
/// float invContProbability, PhotonData ioPhotons, uint* d_ioIsValid)
///
/// \brief Marks photons for elimination using russian roulette.
///
/// Due to parallel exectution, this kernel does not perform the actual elimination. Only
/// a valid flag array is updated. However, photon powers are scaled according to russian
/// roulette.
///
/// \author Mathias Neumann
/// \date 23.06.2010
///
/// \param [in] d_randoms Uniform random numbers, one for each photon.
/// \param contProbability The continue probability.
/// \param invContProbability The inverse continue probability.
/// \param ioPhotons Photon data to consider. All photon powers are scaled by the inverse
/// continue probability according to the russian roulette scheme.
/// \param [in,out] d_ioIsValid Pass in the old valid flags (binary 0/1 array). For each eliminated
/// photon its flag is forced to 0.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_RussianRoulette(float* d_randoms, float contProbability,
float invContProbability,
PhotonData ioPhotons, uint* d_ioIsValid)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < ioPhotons.numPhotons)
{
// Just read, even when corresponding photon not valid.
float rnd = d_randoms[tid];
uint oldValid = d_ioIsValid[tid];
d_ioIsValid[tid] = ((rnd <= contProbability && oldValid) ? 1 : 0);
// Update flux to account for missing contributions of terminated paths (PBR p. 781).
float4 phFlux = ioPhotons.d_powers[tid];
phFlux.x *= invContProbability; // alpha /= contProbability
phFlux.y *= invContProbability;
phFlux.z *= invContProbability;
ioPhotons.d_powers[tid] = phFlux;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_SpawnScatteredPhotons(PhotonData ioPhotons, float4* d_hitDiffClrs,
/// float4* d_hitSpecClrs, float4* d_normalG, float4* d_normalS, float* d_randoms1,
/// float* d_randoms2, float* d_randoms3, uint* d_outIsLastSpecular, uint* d_outIsValid)
///
/// \brief Spawns scattered photons for given photon data (inplace).
///
/// Each thread works on a single photon and generates a new, scattered photon. Up to
/// three random numbers are required for a single photon (BSDF selection, new direction
/// selection).
///
/// Right now I disabled BSDF selection and handle diffuse BRDFs only. Specular surfaces
/// were added, but never fully implemented, so that parts of MNRT are not ready for them.
///
/// \author Mathias Neumann
/// \date 12.04.2010
///
/// \param ioPhotons The photon data to update inplace.
/// \param [in] d_hitDiffClrs Diffuse color of the surface hit by each photon. Color in
/// \c xyz and transparency alpha in \c w.
/// \param [in] d_hitSpecClrs Specular color of the surface hit by each photon. Color
/// in \c xyz and index of refraction in \c w.
/// \param [in] d_normalG Geometric normal at photon intersection for each photon.
/// \param [in] d_normalS Shading normal at photon intersection for each photon.
/// \param [in] d_randoms1 First uniform random number array. One random number for
/// each photon.
/// \param [in] d_randoms2 Second uniform random number array. One random number for
/// each photon.
/// \param [in] d_randoms3 Third uniform random number array. One random number for
/// each photon.
/// \param [out] d_outIsLastSpecular Binary 0/1 array. Will contain 1 for photons that underwent
/// a specular reflection/transmission, else 0.
/// \param [out] d_outIsValid Binary 0/1 array. Will contain 1 for valid and 0 for
/// invalid photons.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_SpawnScatteredPhotons(PhotonData ioPhotons,
float4* d_hitDiffClrs, float4* d_hitSpecClrs,
float4* d_normalG, float4* d_normalS,
float* d_randoms1, float* d_randoms2, float* d_randoms3,
uint* d_outIsLastSpecular, uint* d_outIsValid)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < ioPhotons.numPhotons)
{
// Read out source photon direction.
float azimuthal = ioPhotons.d_positions[tid].w;
float polar = ioPhotons.d_powers[tid].w;
float3 w_o = -dev_Spherical2Direction(azimuthal, polar);
float4 clrDiffHit4 = d_hitDiffClrs[tid];
float3 clrDiffHit = make_float3(clrDiffHit4.x, clrDiffHit4.y, clrDiffHit4.z);
float4 clrSpecHit4 = d_hitSpecClrs[tid];
float3 clrSpecHit = make_float3(clrSpecHit4.x, clrSpecHit4.y, clrSpecHit4.z);
float transAlpha = clrDiffHit4.w;
//float nTo = clrSpecHit4.w;
float3 n_s = make_float3(d_normalS[tid]);
// Calculate n_from/n_to in correct order depending on which direction the photon
// is traveling.
/*float indexRefrFromTo;
if(dot(n_s, w_o) < 0.f)
indexRefrFromTo = nTo; // Swap: n_to/n_from (leaving medium).
else
indexRefrFromTo = 1.f / nTo; // n_from = 1.f !*/
// I use pseudo random numbers here because in PBR, page 781, "the advantages of
// low-discrepancy points are mostly lost as more bounces occur".
float rnd1 = d_randoms1[tid];
float rnd2 = d_randoms2[tid];
//float rnd3 = d_randoms3[tid];
// Choose which BSDF to use. Currently I support:
//
// - Lambertian (perfect diffuse)
// - Perfect specular
//
// However not all BSDFs have to be present.
bool hasDiffuse = dot(clrDiffHit, clrDiffHit) > 0.f && (transAlpha > 0.f);
bool hasSpecular = dot(clrSpecHit, clrSpecHit) > 0.f;
bool hasTransmit = dot(clrDiffHit, clrDiffHit) > 0.f && (transAlpha < 1.f);
float3 n_g = make_float3(d_normalG[tid]);
// Adding in PDFs from other BxDFs not required as:
// - For Lambertian, the other are specular and have PDF of zero.
// - For Specular, adding in not useful (see PBR p. 693).
// Adding in f() value from other BxDFs not required as:
// - For Lambertian, the other are specular with F() = 0 w.p. 1 (See PBR p. 693 and p. 428).
// - For Specular, adding in not useful (see PBR p. 693).
float pdf = 0.f;
float3 f, w_i;
bool bIsSpecReflect = false, bIsSpecTransmit = false;
// Temporary simplification.
if(hasDiffuse)
{
// Lambertian only.
// According to Veach, p. 154, particle directions w_i have to be sampled with respect to the
// geometric normal density |w_i dot N_g|.
f = dev_SampleDirectionLambertian(w_o, n_g,
rnd1, rnd2, clrDiffHit, &w_i, &pdf);
}
/*if(!hasSpecular && !hasTransmit)
{
// Lambertian only.
// According to Veach, p. 154, particle directions w_i have to be sampled with respect to the
// geometric normal density |w_i dot N_g|.
f = dev_SampleDirectionLambertian(w_o, n_g,
rnd1, rnd2, clrDiffHit, &w_i, &pdf);
}
else if(!hasDiffuse && !hasTransmit)
{
// Specular only.
f = dev_SampleDirectionSpecReflect(w_o, n_s,
rnd1, rnd2, clrSpecHit, &w_i, &pdf);
bIsSpecReflect = true;
}
else if(!hasDiffuse && !hasSpecular)
{
// Transmit only.
f = dev_SampleDirectionSpecTransmit(w_o, n_s,
rnd1, rnd2, clrDiffHit, indexRefrFromTo, true, &w_i, &pdf);
bIsSpecTransmit = true;
}
else if(hasDiffuse && hasSpecular && !hasTransmit)
{
if(rnd3 < 0.5f)
f = dev_SampleDirectionLambertian(w_o, n_g,
rnd1, rnd2, clrDiffHit, &w_i, &pdf);
else
{
f = dev_SampleDirectionSpecReflect(w_o, n_s,
rnd1, rnd2, clrSpecHit, &w_i, &pdf);
bIsSpecReflect = true;
}
pdf *= 0.5f;
}
else if(hasDiffuse && hasTransmit && !hasSpecular)
{
if(rnd3 < 0.5f)
f = dev_SampleDirectionLambertian(w_o, n_g,
rnd1, rnd2, clrDiffHit * transAlpha, &w_i, &pdf);
else
{
f = dev_SampleDirectionSpecTransmit(w_o, n_s,
rnd1, rnd2, clrDiffHit * (1.f - transAlpha), indexRefrFromTo, true, &w_i, &pdf);
bIsSpecTransmit = true;
}
pdf *= 0.5f;
}
else if(hasDiffuse && hasSpecular && hasTransmit)
{
// NOTE: Determine how to handle internal specular reflections after and before
// specular transmissions (e.g. within a sphere). There would lead to an
// exorbitant PDF as such paths would only be taken by very few photons.
// They could lead to bright spots of different color and can be identified
// by visualizing the photons and scaling the gathering result appropriately.
if(rnd3 < 0.33333333333f)
f = dev_SampleDirectionLambertian(w_o, n_g,
rnd1, rnd2, clrDiffHit * transAlpha, &w_i, &pdf);
else if(rnd3 < 0.66666666666f)
{
f = dev_SampleDirectionSpecReflect(w_o, n_s,
rnd1, rnd2, clrSpecHit, &w_i, &pdf);
bIsSpecReflect = true;
}
else
{
f = dev_SampleDirectionSpecTransmit(w_o, n_s,
rnd1, rnd2, clrDiffHit * (1.f - transAlpha), indexRefrFromTo, true, &w_i, &pdf);
bIsSpecTransmit = true;
}
pdf *= 0.33333333333f;
}*/
else
{
// Not supported / nothing to sample.
pdf = 0.f;
w_i = -w_o;
f = make_float3(0.f, 0.f, 0.f);
}
// Store new photon direction.
float2 sphericalNew = dev_Direction2Spherical(w_i);
float4 oldPos = ioPhotons.d_positions[tid];
ioPhotons.d_positions[tid] = make_float4(oldPos.x, oldPos.y, oldPos.z, sphericalNew.x);
float polarNew = sphericalNew.y;
// Avoid reflection in case w_i and w_o lie in different hemispheres
// with respect to n_g. PBR p. 465 or VeachPhD, p. 153.
if(!bIsSpecTransmit && dot(w_i, n_g) * dot(w_o, n_g) <= 0.f)
pdf = 0.f;
// Avoid transmission in case w_i and w_o lie in the same hemisphere.
if(bIsSpecTransmit && dot(w_i, n_g) * dot(w_o, n_g) > 0.f)
pdf = 0.f;
// Store if this was a specular reflection.
d_outIsLastSpecular[tid] = ((bIsSpecReflect || bIsSpecTransmit) ? 1 : 0);
// Set flux to zero in case the PDF is zero. Those photons will be eliminated after the
// next tracing step.
float alpha = 0.f;
// See Veach1997, page 154, where the problem using shading normals is described
// and this weighting formula for particle tracing was developed.
if(pdf != 0.f)
alpha = fabsf(dot(w_o, n_s)) * fabsf(dot(w_i, n_g)) /
(pdf * fabsf(dot(w_o, n_g)));
// Read out old flux.
float3 phFlux = make_float3(ioPhotons.d_powers[tid]);
float3 myFlux = phFlux * f * alpha;
d_outIsValid[tid] = myFlux.x > 0.f || myFlux.y > 0.f || myFlux.z > 0.f;
// Update photon power. Leave position alone as it isn't changed.
ioPhotons.d_powers[tid] = make_float4(myFlux.x, myFlux.y, myFlux.z, polarNew);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_ScaleFlux(PhotonData ioPhotons, float scale)
///
/// \brief Scales the flux component of each photon.
///
/// Note that this kernel is not replaceable with ::mncudaScaleVectorArray(). As the
/// spherical polar coordinate is stored in the w-component of the powers, using this
/// utility function will not work.
///
/// \author Mathias Neumann
/// \date August 2010
///
/// \param ioPhotons The photon data.
/// \param scale The power scale factor.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_ScaleFlux(PhotonData ioPhotons, float scale)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < ioPhotons.numPhotons)
{
// Do not manipulate w coordinate (spherical polar)!
float4 phFlux = ioPhotons.d_powers[tid];
phFlux.x *= scale;
phFlux.y *= scale;
phFlux.z *= scale;
ioPhotons.d_powers[tid] = phFlux;
}
}
//@}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Moves light data to constant memory.
extern "C"
void PMUpdateBuildData(const LightData& lights)
{
mncudaSafeCallNoSync(hipMemcpyToSymbol("c_Lights", &lights, sizeof(LightData)));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \WRAPPERS
////////////////////////////////////////////////////////////////////////////////////////////////////
//@{
/// Wraps kernel_SpawnLightPhotons() kernel call.
extern "C"
void KernelPMSpawnLightPhotons(LightType type, uint photonOffset, uint numToSpawn,
float3 worldCenter, float worldRadius,
PhotonData& outPhotonSpawn)
{
MNAssert(outPhotonSpawn.numPhotons == 0);
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(numToSpawn, blockSize.x), 1, 1);
hipLaunchKernelGGL(( kernel_SpawnLightPhotons), dim3(gridSize), dim3(blockSize), 0, 0, photonOffset, numToSpawn,
worldCenter, worldRadius, outPhotonSpawn);
MNCUDA_CHECKERROR;
outPhotonSpawn.numPhotons = numToSpawn;
}
/// Wraps kernel_RussianRoulette() kernel call.
extern "C"
void KernelPMRussianRoulette(float* d_randoms, float contProbability,
PhotonData& ioPhotons, uint* d_ioIsValid)
{
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(ioPhotons.numPhotons, blockSize.x), 1, 1);
hipLaunchKernelGGL(( kernel_RussianRoulette), dim3(gridSize), dim3(blockSize), 0, 0, d_randoms, contProbability, 1.f / contProbability,
ioPhotons, d_ioIsValid);
MNCUDA_CHECKERROR;
}
/// Wraps kernel_SpawnScatteredPhotons() kernel call.
extern "C"
void KernelPMSpawnScatteredPhotons(PhotonData& ioPhotons,
float4* d_normalsG, float4* d_normalsS,
float4* d_hitDiffClrs, float4* d_hitSpecClrs,
float* d_randoms1, float* d_randoms2, float* d_randoms3,
uint* d_outIsLastSpecular, uint* d_outIsValid)
{
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(ioPhotons.numPhotons, blockSize.x), 1, 1);
hipLaunchKernelGGL(( kernel_SpawnScatteredPhotons), dim3(gridSize), dim3(blockSize), 0, 0, ioPhotons,
d_hitDiffClrs, d_hitSpecClrs, d_normalsG, d_normalsS,
d_randoms1, d_randoms2, d_randoms3, d_outIsLastSpecular, d_outIsValid);
MNCUDA_CHECKERROR;
}
/// Wraps kernel_ScaleFlux() kernel call.
extern "C"
void KernelPMScaleFlux(PhotonData& ioPhotons, float scale)
{
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(ioPhotons.numPhotons, blockSize.x), 1, 1);
hipLaunchKernelGGL(( kernel_ScaleFlux), dim3(gridSize), dim3(blockSize), 0, 0, ioPhotons, scale);
MNCUDA_CHECKERROR;
}
//@}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////// | 8315a733936e4953034dc37e5ae284de475a2b29.cu | ////////////////////////////////////////////////////////////////////////////////////////////////////
// MNRT License
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2010 Mathias Neumann, www.maneumann.com.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
//
// 3. Neither the name Mathias Neumann, nor the names of contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \file GPU\photon_build.cu
///
/// \brief Kernels for photon map construction, specifically photon tracing.
///
/// \author Mathias Neumann
/// \date 09.04.2010
/// \ingroup globalillum
////////////////////////////////////////////////////////////////////////////////////////////////////
#include "KernelDefs.h"
#include "photon_dev.h"
#include "sample_dev.h"
/// Light data constant memory variable.
__constant__ LightData c_Lights;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \KERNELS
////////////////////////////////////////////////////////////////////////////////////////////////////
//@{
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_SpawnLightPhotons(uint photonOffset, uint numToSpawn,
/// float3 worldCenter, float worldRadius, PhotonData outPhotonSpawn)
///
/// \brief Spawns photons from the given light source.
///
/// The number of photons is given fixed and controls the number of threads to spawn, as
/// each thread handles a photon.
///
/// \author Mathias Neumann
/// \date 09.04.2010
///
/// \param photonOffset The photon offset (depends on how many photons spawned already). Is
/// used to compute distinct members of the halton sequence for all
/// spawned photons.
/// \param numToSpawn Number of photons to spawn.
/// \param worldCenter The world center. Used for directional lights.
/// \param worldRadius The world radius. Used for directional lights.
/// \param outPhotonSpawn Will contain the spawned photons. All previous contents are overwritten.
/// Remember to set the new photon count after kernel execution.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_SpawnLightPhotons(uint photonOffset, uint numToSpawn,
float3 worldCenter, float worldRadius,
PhotonData outPhotonSpawn)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < numToSpawn)
{
float3 myPos, myFlux, myDir;
float rnd1 = dev_RadicalInverse(photonOffset + tid+1, 2);
float rnd2 = dev_RadicalInverse(photonOffset + tid+1, 3);
float3 ptL = c_Lights.position;
LightType type = c_Lights.type;
if(type == Light_Point)
{
myPos = ptL;
// Note that point lights store intensity instead of emitted radiance.
float3 intensity = c_Lights.L_emit;
// Power (flux) emitted by a point light is found by integrating intensity over the
// entire sphere of directions, see PBR page 603. This is exactly the same as we would
// get when dividing the intensity by the PDF of sampling the point light source,
// 1.f / (4.f * MN_PI).
myFlux = intensity * 4.f * MN_PI;
// Generate photons direction using QRNG. We use the 3D Halton sequence we can
// generate from the radical inverse function. As proposed in "Physically based
// rendering", we use the first 3 prime numbers as bases.
myDir.x = 2.f * rnd1 - 1.f;
myDir.y = 2.f * rnd2 - 1.f;
myDir.z = 2.f * dev_RadicalInverse(photonOffset + tid+1, 5) - 1.f;
// Avoid myDir = 0.
if(myDir.x == myDir.y && myDir.y == myDir.z && myDir.z == 0.f)
myDir.x = 1.f;
// Normalize direction.
myDir = normalize(myDir);
}
else if(type == Light_AreaDisc)
{
float3 discNormal = c_Lights.direction;
float3 L_emit = c_Lights.L_emit;
float discRadius = c_Lights.areaRadius;
// Sample position on disc.
myPos = dev_SampleGeneralDisc(ptL, discNormal, discRadius, rnd1, rnd2);
// Cosine-sample direction.
float rnd3 = dev_RadicalInverse(photonOffset + tid+1, 5);
float rnd4 = dev_RadicalInverse(photonOffset + tid+1, 7);
float pdfCosine;
myDir = dev_SampleHemisphereCosine(discNormal, rnd3, rnd4, &pdfCosine);
// Compute PDF for sampling directions from the area light. That is the product
// of the PDF for sampling the ray origin myPos with respect to the surface area
// with the PDF of sampling the direction.
// See PBR, p704. Note that the PDF for sampling a point on the surface area is
// just 1 / area.
float surfaceArea = MN_PI * discRadius * discRadius;
float pdf = pdfCosine / surfaceArea;
// No need to check whether we are emiting to the wrong side.
if(pdf != 0.f)
myFlux = fabsf(dot(myDir, discNormal)) * L_emit / pdf;
else
myFlux = make_float3(0.f);
}
else if(type == Light_AreaRect)
{
float3 rectNormal = c_Lights.direction;
float3 L_emit = c_Lights.L_emit;
// Sample position on rect.
float3 v1 = c_Lights.areaV1;
float3 v2 = c_Lights.areaV2;
myPos = ptL + rnd1*v1 + rnd2*v2;
// Cosine-sample direction.
float rnd3 = dev_RadicalInverse(photonOffset + tid+1, 5);
float rnd4 = dev_RadicalInverse(photonOffset + tid+1, 7);
float pdfCosine;
myDir = dev_SampleHemisphereCosine(rectNormal, rnd3, rnd4, &pdfCosine);
// Compute PDF for sampling directions from the area light. That is the product
// of the PDF for sampling the ray origin myPos with respect to the surface area
// with the PDF of sampling the direction.
// See PBR, p704. Note that the PDF for sampling a point on the surface area is
// just 1 / area.
float surfaceArea = length(v1) * length(v2);
float pdf = pdfCosine / surfaceArea;
// No need to check whether we are emiting to the wrong side.
if(pdf != 0.f)
myFlux = fabsf(dot(myDir, rectNormal)) * L_emit / pdf;
else
myFlux = make_float3(0.f);
}
else if(type == Light_Directional)
{
float3 lightDir = c_Lights.direction;
float3 L_emit = c_Lights.L_emit;
float3 ptDisk = dev_SampleGeneralDisc(worldCenter, lightDir, worldRadius,
rnd1, rnd2);
float pdf = MN_INV_PI / (worldRadius*worldRadius);
// Now set photon properties.
myPos = ptDisk - worldRadius * lightDir; // Offset point
myDir = lightDir;
myFlux = L_emit / pdf;
}
// Store the photon in the spawn list.
dev_PhotonStore(outPhotonSpawn, tid, myPos, myDir, myFlux);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_RussianRoulette(float* d_randoms, float contProbability,
/// float invContProbability, PhotonData ioPhotons, uint* d_ioIsValid)
///
/// \brief Marks photons for elimination using russian roulette.
///
/// Due to parallel exectution, this kernel does not perform the actual elimination. Only
/// a valid flag array is updated. However, photon powers are scaled according to russian
/// roulette.
///
/// \author Mathias Neumann
/// \date 23.06.2010
///
/// \param [in] d_randoms Uniform random numbers, one for each photon.
/// \param contProbability The continue probability.
/// \param invContProbability The inverse continue probability.
/// \param ioPhotons Photon data to consider. All photon powers are scaled by the inverse
/// continue probability according to the russian roulette scheme.
/// \param [in,out] d_ioIsValid Pass in the old valid flags (binary 0/1 array). For each eliminated
/// photon its flag is forced to 0.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_RussianRoulette(float* d_randoms, float contProbability,
float invContProbability,
PhotonData ioPhotons, uint* d_ioIsValid)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < ioPhotons.numPhotons)
{
// Just read, even when corresponding photon not valid.
float rnd = d_randoms[tid];
uint oldValid = d_ioIsValid[tid];
d_ioIsValid[tid] = ((rnd <= contProbability && oldValid) ? 1 : 0);
// Update flux to account for missing contributions of terminated paths (PBR p. 781).
float4 phFlux = ioPhotons.d_powers[tid];
phFlux.x *= invContProbability; // alpha /= contProbability
phFlux.y *= invContProbability;
phFlux.z *= invContProbability;
ioPhotons.d_powers[tid] = phFlux;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_SpawnScatteredPhotons(PhotonData ioPhotons, float4* d_hitDiffClrs,
/// float4* d_hitSpecClrs, float4* d_normalG, float4* d_normalS, float* d_randoms1,
/// float* d_randoms2, float* d_randoms3, uint* d_outIsLastSpecular, uint* d_outIsValid)
///
/// \brief Spawns scattered photons for given photon data (inplace).
///
/// Each thread works on a single photon and generates a new, scattered photon. Up to
/// three random numbers are required for a single photon (BSDF selection, new direction
/// selection).
///
/// Right now I disabled BSDF selection and handle diffuse BRDFs only. Specular surfaces
/// were added, but never fully implemented, so that parts of MNRT are not ready for them.
///
/// \author Mathias Neumann
/// \date 12.04.2010
///
/// \param ioPhotons The photon data to update inplace.
/// \param [in] d_hitDiffClrs Diffuse color of the surface hit by each photon. Color in
/// \c xyz and transparency alpha in \c w.
/// \param [in] d_hitSpecClrs Specular color of the surface hit by each photon. Color
/// in \c xyz and index of refraction in \c w.
/// \param [in] d_normalG Geometric normal at photon intersection for each photon.
/// \param [in] d_normalS Shading normal at photon intersection for each photon.
/// \param [in] d_randoms1 First uniform random number array. One random number for
/// each photon.
/// \param [in] d_randoms2 Second uniform random number array. One random number for
/// each photon.
/// \param [in] d_randoms3 Third uniform random number array. One random number for
/// each photon.
/// \param [out] d_outIsLastSpecular Binary 0/1 array. Will contain 1 for photons that underwent
/// a specular reflection/transmission, else 0.
/// \param [out] d_outIsValid Binary 0/1 array. Will contain 1 for valid and 0 for
/// invalid photons.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_SpawnScatteredPhotons(PhotonData ioPhotons,
float4* d_hitDiffClrs, float4* d_hitSpecClrs,
float4* d_normalG, float4* d_normalS,
float* d_randoms1, float* d_randoms2, float* d_randoms3,
uint* d_outIsLastSpecular, uint* d_outIsValid)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < ioPhotons.numPhotons)
{
// Read out source photon direction.
float azimuthal = ioPhotons.d_positions[tid].w;
float polar = ioPhotons.d_powers[tid].w;
float3 w_o = -dev_Spherical2Direction(azimuthal, polar);
float4 clrDiffHit4 = d_hitDiffClrs[tid];
float3 clrDiffHit = make_float3(clrDiffHit4.x, clrDiffHit4.y, clrDiffHit4.z);
float4 clrSpecHit4 = d_hitSpecClrs[tid];
float3 clrSpecHit = make_float3(clrSpecHit4.x, clrSpecHit4.y, clrSpecHit4.z);
float transAlpha = clrDiffHit4.w;
//float nTo = clrSpecHit4.w;
float3 n_s = make_float3(d_normalS[tid]);
// Calculate n_from/n_to in correct order depending on which direction the photon
// is traveling.
/*float indexRefrFromTo;
if(dot(n_s, w_o) < 0.f)
indexRefrFromTo = nTo; // Swap: n_to/n_from (leaving medium).
else
indexRefrFromTo = 1.f / nTo; // n_from = 1.f !*/
// I use pseudo random numbers here because in PBR, page 781, "the advantages of
// low-discrepancy points are mostly lost as more bounces occur".
float rnd1 = d_randoms1[tid];
float rnd2 = d_randoms2[tid];
//float rnd3 = d_randoms3[tid];
// Choose which BSDF to use. Currently I support:
//
// - Lambertian (perfect diffuse)
// - Perfect specular
//
// However not all BSDFs have to be present.
bool hasDiffuse = dot(clrDiffHit, clrDiffHit) > 0.f && (transAlpha > 0.f);
bool hasSpecular = dot(clrSpecHit, clrSpecHit) > 0.f;
bool hasTransmit = dot(clrDiffHit, clrDiffHit) > 0.f && (transAlpha < 1.f);
float3 n_g = make_float3(d_normalG[tid]);
// Adding in PDFs from other BxDFs not required as:
// - For Lambertian, the other are specular and have PDF of zero.
// - For Specular, adding in not useful (see PBR p. 693).
// Adding in f() value from other BxDFs not required as:
// - For Lambertian, the other are specular with F() = 0 w.p. 1 (See PBR p. 693 and p. 428).
// - For Specular, adding in not useful (see PBR p. 693).
float pdf = 0.f;
float3 f, w_i;
bool bIsSpecReflect = false, bIsSpecTransmit = false;
// Temporary simplification.
if(hasDiffuse)
{
// Lambertian only.
// According to Veach, p. 154, particle directions w_i have to be sampled with respect to the
// geometric normal density |w_i dot N_g|.
f = dev_SampleDirectionLambertian(w_o, n_g,
rnd1, rnd2, clrDiffHit, &w_i, &pdf);
}
/*if(!hasSpecular && !hasTransmit)
{
// Lambertian only.
// According to Veach, p. 154, particle directions w_i have to be sampled with respect to the
// geometric normal density |w_i dot N_g|.
f = dev_SampleDirectionLambertian(w_o, n_g,
rnd1, rnd2, clrDiffHit, &w_i, &pdf);
}
else if(!hasDiffuse && !hasTransmit)
{
// Specular only.
f = dev_SampleDirectionSpecReflect(w_o, n_s,
rnd1, rnd2, clrSpecHit, &w_i, &pdf);
bIsSpecReflect = true;
}
else if(!hasDiffuse && !hasSpecular)
{
// Transmit only.
f = dev_SampleDirectionSpecTransmit(w_o, n_s,
rnd1, rnd2, clrDiffHit, indexRefrFromTo, true, &w_i, &pdf);
bIsSpecTransmit = true;
}
else if(hasDiffuse && hasSpecular && !hasTransmit)
{
if(rnd3 < 0.5f)
f = dev_SampleDirectionLambertian(w_o, n_g,
rnd1, rnd2, clrDiffHit, &w_i, &pdf);
else
{
f = dev_SampleDirectionSpecReflect(w_o, n_s,
rnd1, rnd2, clrSpecHit, &w_i, &pdf);
bIsSpecReflect = true;
}
pdf *= 0.5f;
}
else if(hasDiffuse && hasTransmit && !hasSpecular)
{
if(rnd3 < 0.5f)
f = dev_SampleDirectionLambertian(w_o, n_g,
rnd1, rnd2, clrDiffHit * transAlpha, &w_i, &pdf);
else
{
f = dev_SampleDirectionSpecTransmit(w_o, n_s,
rnd1, rnd2, clrDiffHit * (1.f - transAlpha), indexRefrFromTo, true, &w_i, &pdf);
bIsSpecTransmit = true;
}
pdf *= 0.5f;
}
else if(hasDiffuse && hasSpecular && hasTransmit)
{
// NOTE: Determine how to handle internal specular reflections after and before
// specular transmissions (e.g. within a sphere). There would lead to an
// exorbitant PDF as such paths would only be taken by very few photons.
// They could lead to bright spots of different color and can be identified
// by visualizing the photons and scaling the gathering result appropriately.
if(rnd3 < 0.33333333333f)
f = dev_SampleDirectionLambertian(w_o, n_g,
rnd1, rnd2, clrDiffHit * transAlpha, &w_i, &pdf);
else if(rnd3 < 0.66666666666f)
{
f = dev_SampleDirectionSpecReflect(w_o, n_s,
rnd1, rnd2, clrSpecHit, &w_i, &pdf);
bIsSpecReflect = true;
}
else
{
f = dev_SampleDirectionSpecTransmit(w_o, n_s,
rnd1, rnd2, clrDiffHit * (1.f - transAlpha), indexRefrFromTo, true, &w_i, &pdf);
bIsSpecTransmit = true;
}
pdf *= 0.33333333333f;
}*/
else
{
// Not supported / nothing to sample.
pdf = 0.f;
w_i = -w_o;
f = make_float3(0.f, 0.f, 0.f);
}
// Store new photon direction.
float2 sphericalNew = dev_Direction2Spherical(w_i);
float4 oldPos = ioPhotons.d_positions[tid];
ioPhotons.d_positions[tid] = make_float4(oldPos.x, oldPos.y, oldPos.z, sphericalNew.x);
float polarNew = sphericalNew.y;
// Avoid reflection in case w_i and w_o lie in different hemispheres
// with respect to n_g. PBR p. 465 or VeachPhD, p. 153.
if(!bIsSpecTransmit && dot(w_i, n_g) * dot(w_o, n_g) <= 0.f)
pdf = 0.f;
// Avoid transmission in case w_i and w_o lie in the same hemisphere.
if(bIsSpecTransmit && dot(w_i, n_g) * dot(w_o, n_g) > 0.f)
pdf = 0.f;
// Store if this was a specular reflection.
d_outIsLastSpecular[tid] = ((bIsSpecReflect || bIsSpecTransmit) ? 1 : 0);
// Set flux to zero in case the PDF is zero. Those photons will be eliminated after the
// next tracing step.
float alpha = 0.f;
// See Veach1997, page 154, where the problem using shading normals is described
// and this weighting formula for particle tracing was developed.
if(pdf != 0.f)
alpha = fabsf(dot(w_o, n_s)) * fabsf(dot(w_i, n_g)) /
(pdf * fabsf(dot(w_o, n_g)));
// Read out old flux.
float3 phFlux = make_float3(ioPhotons.d_powers[tid]);
float3 myFlux = phFlux * f * alpha;
d_outIsValid[tid] = myFlux.x > 0.f || myFlux.y > 0.f || myFlux.z > 0.f;
// Update photon power. Leave position alone as it isn't changed.
ioPhotons.d_powers[tid] = make_float4(myFlux.x, myFlux.y, myFlux.z, polarNew);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_ScaleFlux(PhotonData ioPhotons, float scale)
///
/// \brief Scales the flux component of each photon.
///
/// Note that this kernel is not replaceable with ::mncudaScaleVectorArray(). As the
/// spherical polar coordinate is stored in the w-component of the powers, using this
/// utility function will not work.
///
/// \author Mathias Neumann
/// \date August 2010
///
/// \param ioPhotons The photon data.
/// \param scale The power scale factor.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_ScaleFlux(PhotonData ioPhotons, float scale)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < ioPhotons.numPhotons)
{
// Do not manipulate w coordinate (spherical polar)!
float4 phFlux = ioPhotons.d_powers[tid];
phFlux.x *= scale;
phFlux.y *= scale;
phFlux.z *= scale;
ioPhotons.d_powers[tid] = phFlux;
}
}
//@}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
/// Moves light data to constant memory.
extern "C"
void PMUpdateBuildData(const LightData& lights)
{
mncudaSafeCallNoSync(cudaMemcpyToSymbol("c_Lights", &lights, sizeof(LightData)));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \WRAPPERS
////////////////////////////////////////////////////////////////////////////////////////////////////
//@{
/// Wraps kernel_SpawnLightPhotons() kernel call.
extern "C"
void KernelPMSpawnLightPhotons(LightType type, uint photonOffset, uint numToSpawn,
float3 worldCenter, float worldRadius,
PhotonData& outPhotonSpawn)
{
MNAssert(outPhotonSpawn.numPhotons == 0);
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(numToSpawn, blockSize.x), 1, 1);
kernel_SpawnLightPhotons<<<gridSize, blockSize>>>(photonOffset, numToSpawn,
worldCenter, worldRadius, outPhotonSpawn);
MNCUDA_CHECKERROR;
outPhotonSpawn.numPhotons = numToSpawn;
}
/// Wraps kernel_RussianRoulette() kernel call.
extern "C"
void KernelPMRussianRoulette(float* d_randoms, float contProbability,
PhotonData& ioPhotons, uint* d_ioIsValid)
{
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(ioPhotons.numPhotons, blockSize.x), 1, 1);
kernel_RussianRoulette<<<gridSize, blockSize>>>(d_randoms, contProbability, 1.f / contProbability,
ioPhotons, d_ioIsValid);
MNCUDA_CHECKERROR;
}
/// Wraps kernel_SpawnScatteredPhotons() kernel call.
extern "C"
void KernelPMSpawnScatteredPhotons(PhotonData& ioPhotons,
float4* d_normalsG, float4* d_normalsS,
float4* d_hitDiffClrs, float4* d_hitSpecClrs,
float* d_randoms1, float* d_randoms2, float* d_randoms3,
uint* d_outIsLastSpecular, uint* d_outIsValid)
{
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(ioPhotons.numPhotons, blockSize.x), 1, 1);
kernel_SpawnScatteredPhotons<<<gridSize, blockSize>>>(ioPhotons,
d_hitDiffClrs, d_hitSpecClrs, d_normalsG, d_normalsS,
d_randoms1, d_randoms2, d_randoms3, d_outIsLastSpecular, d_outIsValid);
MNCUDA_CHECKERROR;
}
/// Wraps kernel_ScaleFlux() kernel call.
extern "C"
void KernelPMScaleFlux(PhotonData& ioPhotons, float scale)
{
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(ioPhotons.numPhotons, blockSize.x), 1, 1);
kernel_ScaleFlux<<<gridSize, blockSize>>>(ioPhotons, scale);
MNCUDA_CHECKERROR;
}
//@}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////// |
7b3ec52078238fc280bf501123f68f899d3cbfa6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<sys/time.h>
typedef unsigned long long bignum;
__host__ __device__ int isPrime(bignum x)
{
bignum i;
bignum lim = (bignum) sqrt((float)x) + 1.0;
for(i=2; i<lim; i++){
if ( x % i == 0)
return 0;
}//end of for loop
return 1;
}//end of isPrime
__global__ void findPrime(int * arr, bignum n)
{
bignum id = blockIdx.x * blockDim.x + threadIdx.x;
bignum num = (2*(id + 1) -1);
if(num < n)
{
arr[num] = isPrime(num);
//arr[num] = 10;
}//end of if
}//end of kernal
int main(int argc, char ** argv)
{
clock_t start = clock();
if(argc < 2)
{
perror("Not Enough Arguments");
exit(-1);
}//end of if
bignum N = atoi(argv[1]);
int blockSize = atoi(argv[2]);
bignum gridsize = (bignum)ceil((N+1)/2.0/blockSize);
if(N <= 0)
{
printf("N isn't a valid number");
exit(-99);
}//end of if statment
int * d_array;
hipMalloc(&d_array, sizeof(int)*(N+1));
hipMemset(d_array, 0, sizeof(int)*(N+1));
hipLaunchKernelGGL(( findPrime), dim3(gridsize), dim3(blockSize), 0, 0, d_array, N+1);
int * h_array = (int *)calloc(N+1, sizeof(int));
hipMemcpy(h_array, d_array, (N+1)*sizeof(int), hipMemcpyDeviceToHost);
int i;
int prime =0;
for(i = 0; i <= N; i++)
{
printf("%d is %d\n", i, h_array[i]);
if(h_array[i] == 1)
prime++;
}//end for loop
free(h_array);
hipFree(d_array);
clock_t end = clock();
printf("primes = %d, time = %f\n", prime,(float) (end-start)/ CLOCKS_PER_SEC);
}//end of main
| 7b3ec52078238fc280bf501123f68f899d3cbfa6.cu | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<sys/time.h>
typedef unsigned long long bignum;
__host__ __device__ int isPrime(bignum x)
{
bignum i;
bignum lim = (bignum) sqrt((float)x) + 1.0;
for(i=2; i<lim; i++){
if ( x % i == 0)
return 0;
}//end of for loop
return 1;
}//end of isPrime
__global__ void findPrime(int * arr, bignum n)
{
bignum id = blockIdx.x * blockDim.x + threadIdx.x;
bignum num = (2*(id + 1) -1);
if(num < n)
{
arr[num] = isPrime(num);
//arr[num] = 10;
}//end of if
}//end of kernal
int main(int argc, char ** argv)
{
clock_t start = clock();
if(argc < 2)
{
perror("Not Enough Arguments");
exit(-1);
}//end of if
bignum N = atoi(argv[1]);
int blockSize = atoi(argv[2]);
bignum gridsize = (bignum)ceil((N+1)/2.0/blockSize);
if(N <= 0)
{
printf("N isn't a valid number");
exit(-99);
}//end of if statment
int * d_array;
cudaMalloc(&d_array, sizeof(int)*(N+1));
cudaMemset(d_array, 0, sizeof(int)*(N+1));
findPrime<<<gridsize, blockSize>>>(d_array, N+1);
int * h_array = (int *)calloc(N+1, sizeof(int));
cudaMemcpy(h_array, d_array, (N+1)*sizeof(int), cudaMemcpyDeviceToHost);
int i;
int prime =0;
for(i = 0; i <= N; i++)
{
printf("%d is %d\n", i, h_array[i]);
if(h_array[i] == 1)
prime++;
}//end for loop
free(h_array);
cudaFree(d_array);
clock_t end = clock();
printf("primes = %d, time = %f\n", prime,(float) (end-start)/ CLOCKS_PER_SEC);
}//end of main
|
729723ab442ef18b300e6f49caa50fe497bc85e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void _bcnn_forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, float *input, float *output, int *indexes)
{
int h = (in_h-1)/stride + 1;
int w = (in_w-1)/stride + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) {
return;
}
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for (l = 0; l < size; ++l) {
for (m = 0; m < size; ++m) {
int cur_h = i * stride + l;
int cur_w = j * stride + m;
int index = cur_w + in_w * (cur_h + in_h * (k + b * in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
} | 729723ab442ef18b300e6f49caa50fe497bc85e7.cu | #include "includes.h"
__global__ void _bcnn_forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, float *input, float *output, int *indexes)
{
int h = (in_h-1)/stride + 1;
int w = (in_w-1)/stride + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) {
return;
}
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for (l = 0; l < size; ++l) {
for (m = 0; m < size; ++m) {
int cur_h = i * stride + l;
int cur_w = j * stride + m;
int index = cur_w + in_w * (cur_h + in_h * (k + b * in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
} |
b09ff77690c6d18a4cbf8f6d5f8034a64b13adf0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// @file sorting_gpu.cu
// @brief Sorting block implementation (GPU)
// @author Andrea Vedaldi
// @author Karel Lenc
//mod. by j.b.<2017>
/*
Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "sorting.hpp"
#include "../datacu.hpp"
#include "sharedmem.cuh"
#include <assert.h>
#include <float.h>
#include <sm_20_atomic_functions.h>
#include <hipcub/hipcub.hpp>
/* ---------------------------------------------------------------- */
/* sorting_max_forward */
/* ---------------------------------------------------------------- */
template<typename T, int BLOCK_THREADS, int ITEMS_PER_THREAD> __global__ void
sorting_kernel
(T* sorted,
const T* data,
const int Height,
const int windowWidth,
const int windowHeight,
const int strideWidth,
const int strideHeight,
const int BoxesInHeight
)
{
const int numElemsPerArray = BLOCK_THREADS * ITEMS_PER_THREAD;
// --- Shared memory allocation
__shared__ T sharedMemoryValueArray[numElemsPerArray];
// --- Specialize BlockStore and BlockRadixSort collective types
typedef cub::BlockRadixSort <int , BLOCK_THREADS, ITEMS_PER_THREAD> BlockRadixSortT; //led dernier argument est pour le rang
// --- Allocate type-safe, repurposable shared memory for collectives
__shared__ typename BlockRadixSortT::TempStorage temp_storage;
// section d_in->d_out
int offsetk= blockIdx.y*gridDim.x*numElemsPerArray;
int offsetj=blockIdx.x/BoxesInHeight;
offsetj=offsetj*strideWidth;
int offseti=blockIdx.x%BoxesInHeight;
offseti=offseti*strideHeight;
//
int block_offset = numElemsPerArray * blockIdx.x+offsetk;
int arrayAddress = 0;
int windowoffsetj = 0;
// --- Load data to shared memory
for (int k = 0; k < ITEMS_PER_THREAD; k++){
arrayAddress = threadIdx.x * ITEMS_PER_THREAD + k;
windowoffsetj=(arrayAddress/windowHeight + offsetj) * Height;
sharedMemoryValueArray[arrayAddress] = data[arrayAddress%windowHeight+windowoffsetj+offseti+offsetk];//loads array
}
__syncthreads();
// --- Collectively sort the keys
BlockRadixSortT(temp_storage).Sort(*static_cast<T(*)[ITEMS_PER_THREAD]>(static_cast<void*>(sharedMemoryValueArray + (threadIdx.x * ITEMS_PER_THREAD))));
__syncthreads();
// --- Write data from shared memory
for (int k = 0; k < ITEMS_PER_THREAD; k++){
arrayAddress = threadIdx.x * ITEMS_PER_THREAD + k;
windowoffsetj=(arrayAddress/windowHeight + offsetj) * Height;
sorted[block_offset + arrayAddress] = sharedMemoryValueArray[arrayAddress];
}
}
/* ---------------------------------------------------------------- */
/* sorting_max_backward */
/* ---------------------------------------------------------------- */
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
// an implementation of atomicAdd() for double (really slow) for older CC
static __device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
template<typename T, int BLOCK_THREADS, int ITEMS_PER_THREAD> __global__ void
sorting_max_backward_kernel
(T* derData,
const T* data,
const T* derSorted,
const int Height,
const int windowWidth,
const int windowHeight,
const int strideWidth,
const int strideHeight,
const int BoxesInHeight
)
{
const int numElemsPerArray = BLOCK_THREADS * ITEMS_PER_THREAD;
// --- Shared memory allocation
__shared__ T sharedMemoryValueArray[numElemsPerArray];
__shared__ int sharedMemoryRanks[numElemsPerArray];
// --- Specialize BlockStore and BlockRadixSort collective types
typedef cub::BlockRadixSort <int , BLOCK_THREADS, ITEMS_PER_THREAD, int> BlockRadixSortT; //led dernier argument est pour le rang
// --- Allocate type-safe, repurposable shared memory for collectives
__shared__ typename BlockRadixSortT::TempStorage temp_storage;
int offsetk= blockIdx.y*gridDim.x*numElemsPerArray;
int offsetj=blockIdx.x/BoxesInHeight;
offsetj=offsetj*strideWidth;
int offseti=blockIdx.x%BoxesInHeight;
offseti=offseti*strideHeight;
int block_offset = numElemsPerArray * blockIdx.x+offsetk;
int arrayAddress = 0;
int windowoffsetj = 0;
int Index = 0;
// --- Load data to shared memory
for (int k = 0; k < ITEMS_PER_THREAD; k++){
arrayAddress = threadIdx.x * ITEMS_PER_THREAD + k;
windowoffsetj=(arrayAddress/windowHeight + offsetj) * Height;
Index = arrayAddress%windowHeight+windowoffsetj+offseti+offsetk;
sharedMemoryValueArray[arrayAddress] = data[Index];//loads array
sharedMemoryRanks[arrayAddress] = Index;
}
__syncthreads();
// --- Collectively sort the pairs (value,rank)_i
BlockRadixSortT(temp_storage).Sort(*static_cast<T(*)[ITEMS_PER_THREAD]>(static_cast<void*>(sharedMemoryValueArray + (threadIdx.x * ITEMS_PER_THREAD))),
*static_cast<int(*)[ITEMS_PER_THREAD]>(static_cast<void*>(sharedMemoryRanks + (threadIdx.x * ITEMS_PER_THREAD))));
__syncthreads();
// --- Write data from shared memory
for (int k = 0; k < ITEMS_PER_THREAD; k++){
arrayAddress = threadIdx.x * ITEMS_PER_THREAD + k;
windowoffsetj=(arrayAddress/windowHeight + offsetj) * Height;
Index = block_offset + arrayAddress;
//d_out[Index] = sharedMemoryValueArray[arrayAddress];
//d_r_out[Index] = sharedMemoryRanks[arrayAddress];
atomicAdd(derData + sharedMemoryRanks[arrayAddress], derSorted[Index]) ;
}
}
/* ---------------------------------------------------------------- */
/* Interface */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template <typename type>
struct sorting_max<vl::VLDT_GPU, type>
{
static vl::ErrorCode
forward(type* sorted,
type const* data,
size_t height, size_t width, size_t depth,
size_t windowHeight, size_t windowWidth,
size_t strideHeight, size_t strideWidth)
{
int BoxesInWidth = (width - windowWidth)/strideWidth + 1 ;
int BoxesInHeight = (height - windowHeight)/strideHeight + 1 ;
int numThreadPerArray = numElemsPerArray;//int numThreadPerArray = numElemsPerArray / numElemsPerThread;
/*
(T* sorted,
const T* data,
const int Height,
const int windowWidth,
const int windowHeight,
const int strideWidth,
const int strideHeight,
const int BoxesInHeight
)
*/
hipLaunchKernelGGL(( sorting_max_kernel<type, numThreadPerArray, numElemsPerThread>)
, dim3(divideAndRoundUp(sortedVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
sorted, data,
height,
windowWidth, windowHeight,
strideWidth, strideHeight,BoxesInHeight);
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
static vl::ErrorCode
backward(type* derData,
type const* data,
type const* derOutput,
size_t height, size_t width, size_t depth,
size_t windowHeight, size_t windowWidth,
size_t strideHeight, size_t strideWidth)
{
int boxesInWidth = (width - windowWidth)/strideWidth + 1 ;
int boxesInHeight = (height - windowHeight)/strideHeight + 1 ;
int boxesInPlane = boxesInWidth * boxesInHeight;
int numThreadPerArray = windowWidth * windowHeight ;
dim3 dimBlock(numThreadPerArray,1);
dim3 dimGrid(boxesInPlane,depth);
hipLaunchKernelGGL(( sorting_max_backward_kernel<type>)
, dim3(divideAndRoundUp(sortedVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
derData, data, derOutput,
windowHeight, windowWidth,
height, width,
windowHeight, windowWidth,
strideHeight, strideWidth,
padTop, padLeft);
/*
(T* derData,
const T* data,
const T* derSorted,
const int Height,
const int windowWidth,
const int windowHeight,
const int strideWidth,
const int strideHeight,
const int BoxesInHeight
)
*/
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ; // sorting_max
} } ; // namespace vl::impl
// Instantiations
template struct vl::impl::sorting_max<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::sorting_max<vl::VLDT_GPU, double> ;
#endif
| b09ff77690c6d18a4cbf8f6d5f8034a64b13adf0.cu | // @file sorting_gpu.cu
// @brief Sorting block implementation (GPU)
// @author Andrea Vedaldi
// @author Karel Lenc
//mod. by j.b.<2017>
/*
Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "sorting.hpp"
#include "../datacu.hpp"
#include "sharedmem.cuh"
#include <assert.h>
#include <float.h>
#include <sm_20_atomic_functions.h>
#include <cub/cub.cuh>
/* ---------------------------------------------------------------- */
/* sorting_max_forward */
/* ---------------------------------------------------------------- */
template<typename T, int BLOCK_THREADS, int ITEMS_PER_THREAD> __global__ void
sorting_kernel
(T* sorted,
const T* data,
const int Height,
const int windowWidth,
const int windowHeight,
const int strideWidth,
const int strideHeight,
const int BoxesInHeight
)
{
const int numElemsPerArray = BLOCK_THREADS * ITEMS_PER_THREAD;
// --- Shared memory allocation
__shared__ T sharedMemoryValueArray[numElemsPerArray];
// --- Specialize BlockStore and BlockRadixSort collective types
typedef cub::BlockRadixSort <int , BLOCK_THREADS, ITEMS_PER_THREAD> BlockRadixSortT; //led dernier argument est pour le rang
// --- Allocate type-safe, repurposable shared memory for collectives
__shared__ typename BlockRadixSortT::TempStorage temp_storage;
// section d_in->d_out
int offsetk= blockIdx.y*gridDim.x*numElemsPerArray;
int offsetj=blockIdx.x/BoxesInHeight;
offsetj=offsetj*strideWidth;
int offseti=blockIdx.x%BoxesInHeight;
offseti=offseti*strideHeight;
//
int block_offset = numElemsPerArray * blockIdx.x+offsetk;
int arrayAddress = 0;
int windowoffsetj = 0;
// --- Load data to shared memory
for (int k = 0; k < ITEMS_PER_THREAD; k++){
arrayAddress = threadIdx.x * ITEMS_PER_THREAD + k;
windowoffsetj=(arrayAddress/windowHeight + offsetj) * Height;
sharedMemoryValueArray[arrayAddress] = data[arrayAddress%windowHeight+windowoffsetj+offseti+offsetk];//loads array
}
__syncthreads();
// --- Collectively sort the keys
BlockRadixSortT(temp_storage).Sort(*static_cast<T(*)[ITEMS_PER_THREAD]>(static_cast<void*>(sharedMemoryValueArray + (threadIdx.x * ITEMS_PER_THREAD))));
__syncthreads();
// --- Write data from shared memory
for (int k = 0; k < ITEMS_PER_THREAD; k++){
arrayAddress = threadIdx.x * ITEMS_PER_THREAD + k;
windowoffsetj=(arrayAddress/windowHeight + offsetj) * Height;
sorted[block_offset + arrayAddress] = sharedMemoryValueArray[arrayAddress];
}
}
/* ---------------------------------------------------------------- */
/* sorting_max_backward */
/* ---------------------------------------------------------------- */
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
// an implementation of atomicAdd() for double (really slow) for older CC
static __device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
template<typename T, int BLOCK_THREADS, int ITEMS_PER_THREAD> __global__ void
sorting_max_backward_kernel
(T* derData,
const T* data,
const T* derSorted,
const int Height,
const int windowWidth,
const int windowHeight,
const int strideWidth,
const int strideHeight,
const int BoxesInHeight
)
{
const int numElemsPerArray = BLOCK_THREADS * ITEMS_PER_THREAD;
// --- Shared memory allocation
__shared__ T sharedMemoryValueArray[numElemsPerArray];
__shared__ int sharedMemoryRanks[numElemsPerArray];
// --- Specialize BlockStore and BlockRadixSort collective types
typedef cub::BlockRadixSort <int , BLOCK_THREADS, ITEMS_PER_THREAD, int> BlockRadixSortT; //led dernier argument est pour le rang
// --- Allocate type-safe, repurposable shared memory for collectives
__shared__ typename BlockRadixSortT::TempStorage temp_storage;
int offsetk= blockIdx.y*gridDim.x*numElemsPerArray;
int offsetj=blockIdx.x/BoxesInHeight;
offsetj=offsetj*strideWidth;
int offseti=blockIdx.x%BoxesInHeight;
offseti=offseti*strideHeight;
int block_offset = numElemsPerArray * blockIdx.x+offsetk;
int arrayAddress = 0;
int windowoffsetj = 0;
int Index = 0;
// --- Load data to shared memory
for (int k = 0; k < ITEMS_PER_THREAD; k++){
arrayAddress = threadIdx.x * ITEMS_PER_THREAD + k;
windowoffsetj=(arrayAddress/windowHeight + offsetj) * Height;
Index = arrayAddress%windowHeight+windowoffsetj+offseti+offsetk;
sharedMemoryValueArray[arrayAddress] = data[Index];//loads array
sharedMemoryRanks[arrayAddress] = Index;
}
__syncthreads();
// --- Collectively sort the pairs (value,rank)_i
BlockRadixSortT(temp_storage).Sort(*static_cast<T(*)[ITEMS_PER_THREAD]>(static_cast<void*>(sharedMemoryValueArray + (threadIdx.x * ITEMS_PER_THREAD))),
*static_cast<int(*)[ITEMS_PER_THREAD]>(static_cast<void*>(sharedMemoryRanks + (threadIdx.x * ITEMS_PER_THREAD))));
__syncthreads();
// --- Write data from shared memory
for (int k = 0; k < ITEMS_PER_THREAD; k++){
arrayAddress = threadIdx.x * ITEMS_PER_THREAD + k;
windowoffsetj=(arrayAddress/windowHeight + offsetj) * Height;
Index = block_offset + arrayAddress;
//d_out[Index] = sharedMemoryValueArray[arrayAddress];
//d_r_out[Index] = sharedMemoryRanks[arrayAddress];
atomicAdd(derData + sharedMemoryRanks[arrayAddress], derSorted[Index]) ;
}
}
/* ---------------------------------------------------------------- */
/* Interface */
/* ---------------------------------------------------------------- */
namespace vl { namespace impl {
template <typename type>
struct sorting_max<vl::VLDT_GPU, type>
{
static vl::ErrorCode
forward(type* sorted,
type const* data,
size_t height, size_t width, size_t depth,
size_t windowHeight, size_t windowWidth,
size_t strideHeight, size_t strideWidth)
{
int BoxesInWidth = (width - windowWidth)/strideWidth + 1 ;
int BoxesInHeight = (height - windowHeight)/strideHeight + 1 ;
int numThreadPerArray = numElemsPerArray;//int numThreadPerArray = numElemsPerArray / numElemsPerThread;
/*
(T* sorted,
const T* data,
const int Height,
const int windowWidth,
const int windowHeight,
const int strideWidth,
const int strideHeight,
const int BoxesInHeight
)
*/
sorting_max_kernel<type, numThreadPerArray, numElemsPerThread>
<<< divideAndRoundUp(sortedVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(sorted, data,
height,
windowWidth, windowHeight,
strideWidth, strideHeight,BoxesInHeight);
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
static vl::ErrorCode
backward(type* derData,
type const* data,
type const* derOutput,
size_t height, size_t width, size_t depth,
size_t windowHeight, size_t windowWidth,
size_t strideHeight, size_t strideWidth)
{
int boxesInWidth = (width - windowWidth)/strideWidth + 1 ;
int boxesInHeight = (height - windowHeight)/strideHeight + 1 ;
int boxesInPlane = boxesInWidth * boxesInHeight;
int numThreadPerArray = windowWidth * windowHeight ;
dim3 dimBlock(numThreadPerArray,1);
dim3 dimGrid(boxesInPlane,depth);
sorting_max_backward_kernel<type>
<<< divideAndRoundUp(sortedVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(derData, data, derOutput,
windowHeight, windowWidth,
height, width,
windowHeight, windowWidth,
strideHeight, strideWidth,
padTop, padLeft);
/*
(T* derData,
const T* data,
const T* derSorted,
const int Height,
const int windowWidth,
const int windowHeight,
const int strideWidth,
const int strideHeight,
const int BoxesInHeight
)
*/
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ;
}
} ; // sorting_max
} } ; // namespace vl::impl
// Instantiations
template struct vl::impl::sorting_max<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::sorting_max<vl::VLDT_GPU, double> ;
#endif
|
cc36e02a1d61256fcccc6df39aeb010795ccd6c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_advec_mom_kernel_x2 [5][2];
static int dims_advec_mom_kernel_x2_h [5][2] = {0};
//user function
__device__
inline void advec_mom_kernel_x2_gpu(ACC<double> &pre_vol,
ACC<double> &post_vol,
const ACC<double> &volume,
const ACC<double> &vol_flux_y,
const ACC<double> &vol_flux_z) {
post_vol(0,0,0) = volume(0,0,0) + vol_flux_z(0,0,1) - vol_flux_z(0,0,0);
pre_vol(0,0,0) = post_vol(0,0,0) + vol_flux_y(0,1,0) - vol_flux_y(0,0,0);
}
__global__ void ops_advec_mom_kernel_x2(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[0][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[0][0] * dims_advec_mom_kernel_x2[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[1][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[1][0] * dims_advec_mom_kernel_x2[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[2][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[2][0] * dims_advec_mom_kernel_x2[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[3][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[3][0] * dims_advec_mom_kernel_x2[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[4][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[4][0] * dims_advec_mom_kernel_x2[4][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_advec_mom_kernel_x2[0][0], dims_advec_mom_kernel_x2[0][1], arg0);
ACC<double> argp1(dims_advec_mom_kernel_x2[1][0], dims_advec_mom_kernel_x2[1][1], arg1);
const ACC<double> argp2(dims_advec_mom_kernel_x2[2][0], dims_advec_mom_kernel_x2[2][1], arg2);
const ACC<double> argp3(dims_advec_mom_kernel_x2[3][0], dims_advec_mom_kernel_x2[3][1], arg3);
const ACC<double> argp4(dims_advec_mom_kernel_x2[4][0], dims_advec_mom_kernel_x2[4][1], arg4);
advec_mom_kernel_x2_gpu(argp0, argp1, argp2, argp3,
argp4);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel_x2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4) {
#else
void ops_par_loop_advec_mom_kernel_x2_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,5,range,122)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(122,"advec_mom_kernel_x2");
OPS_kernels[122].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 5,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != dims_advec_mom_kernel_x2_h[0][0] || ydim0 != dims_advec_mom_kernel_x2_h[0][1] || xdim1 != dims_advec_mom_kernel_x2_h[1][0] || ydim1 != dims_advec_mom_kernel_x2_h[1][1] || xdim2 != dims_advec_mom_kernel_x2_h[2][0] || ydim2 != dims_advec_mom_kernel_x2_h[2][1] || xdim3 != dims_advec_mom_kernel_x2_h[3][0] || ydim3 != dims_advec_mom_kernel_x2_h[3][1] || xdim4 != dims_advec_mom_kernel_x2_h[4][0] || ydim4 != dims_advec_mom_kernel_x2_h[4][1]) {
dims_advec_mom_kernel_x2_h[0][0] = xdim0;
dims_advec_mom_kernel_x2_h[0][1] = ydim0;
dims_advec_mom_kernel_x2_h[1][0] = xdim1;
dims_advec_mom_kernel_x2_h[1][1] = ydim1;
dims_advec_mom_kernel_x2_h[2][0] = xdim2;
dims_advec_mom_kernel_x2_h[2][1] = ydim2;
dims_advec_mom_kernel_x2_h[3][0] = xdim3;
dims_advec_mom_kernel_x2_h[3][1] = ydim3;
dims_advec_mom_kernel_x2_h[4][0] = xdim4;
dims_advec_mom_kernel_x2_h[4][1] = ydim4;
cutilSafeCall(hipMemcpyToSymbol( dims_advec_mom_kernel_x2, dims_advec_mom_kernel_x2_h, sizeof(dims_advec_mom_kernel_x2)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[5];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args,5,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[122].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_advec_mom_kernel_x2), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[122].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[122].mpi_time += t2-t1;
OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel_x2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 122;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 122;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 5;
desc->args = (ops_arg*)malloc(5*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->function = ops_par_loop_advec_mom_kernel_x2_execute;
if (OPS_diags > 1) {
ops_timing_realloc(122,"advec_mom_kernel_x2");
}
ops_enqueue_kernel(desc);
}
#endif
| cc36e02a1d61256fcccc6df39aeb010795ccd6c1.cu | //
// auto-generated by ops.py
//
__constant__ int dims_advec_mom_kernel_x2 [5][2];
static int dims_advec_mom_kernel_x2_h [5][2] = {0};
//user function
__device__
inline void advec_mom_kernel_x2_gpu(ACC<double> &pre_vol,
ACC<double> &post_vol,
const ACC<double> &volume,
const ACC<double> &vol_flux_y,
const ACC<double> &vol_flux_z) {
post_vol(0,0,0) = volume(0,0,0) + vol_flux_z(0,0,1) - vol_flux_z(0,0,0);
pre_vol(0,0,0) = post_vol(0,0,0) + vol_flux_y(0,1,0) - vol_flux_y(0,0,0);
}
__global__ void ops_advec_mom_kernel_x2(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[0][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[0][0] * dims_advec_mom_kernel_x2[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[1][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[1][0] * dims_advec_mom_kernel_x2[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[2][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[2][0] * dims_advec_mom_kernel_x2[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[3][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[3][0] * dims_advec_mom_kernel_x2[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel_x2[4][0] + idx_z * 1*1 * dims_advec_mom_kernel_x2[4][0] * dims_advec_mom_kernel_x2[4][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_advec_mom_kernel_x2[0][0], dims_advec_mom_kernel_x2[0][1], arg0);
ACC<double> argp1(dims_advec_mom_kernel_x2[1][0], dims_advec_mom_kernel_x2[1][1], arg1);
const ACC<double> argp2(dims_advec_mom_kernel_x2[2][0], dims_advec_mom_kernel_x2[2][1], arg2);
const ACC<double> argp3(dims_advec_mom_kernel_x2[3][0], dims_advec_mom_kernel_x2[3][1], arg3);
const ACC<double> argp4(dims_advec_mom_kernel_x2[4][0], dims_advec_mom_kernel_x2[4][1], arg4);
advec_mom_kernel_x2_gpu(argp0, argp1, argp2, argp3,
argp4);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel_x2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4) {
#else
void ops_par_loop_advec_mom_kernel_x2_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,5,range,122)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(122,"advec_mom_kernel_x2");
OPS_kernels[122].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 5,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != dims_advec_mom_kernel_x2_h[0][0] || ydim0 != dims_advec_mom_kernel_x2_h[0][1] || xdim1 != dims_advec_mom_kernel_x2_h[1][0] || ydim1 != dims_advec_mom_kernel_x2_h[1][1] || xdim2 != dims_advec_mom_kernel_x2_h[2][0] || ydim2 != dims_advec_mom_kernel_x2_h[2][1] || xdim3 != dims_advec_mom_kernel_x2_h[3][0] || ydim3 != dims_advec_mom_kernel_x2_h[3][1] || xdim4 != dims_advec_mom_kernel_x2_h[4][0] || ydim4 != dims_advec_mom_kernel_x2_h[4][1]) {
dims_advec_mom_kernel_x2_h[0][0] = xdim0;
dims_advec_mom_kernel_x2_h[0][1] = ydim0;
dims_advec_mom_kernel_x2_h[1][0] = xdim1;
dims_advec_mom_kernel_x2_h[1][1] = ydim1;
dims_advec_mom_kernel_x2_h[2][0] = xdim2;
dims_advec_mom_kernel_x2_h[2][1] = ydim2;
dims_advec_mom_kernel_x2_h[3][0] = xdim3;
dims_advec_mom_kernel_x2_h[3][1] = ydim3;
dims_advec_mom_kernel_x2_h[4][0] = xdim4;
dims_advec_mom_kernel_x2_h[4][1] = ydim4;
cutilSafeCall(cudaMemcpyToSymbol( dims_advec_mom_kernel_x2, dims_advec_mom_kernel_x2_h, sizeof(dims_advec_mom_kernel_x2)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[5];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args,5,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[122].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_advec_mom_kernel_x2<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[122].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[122].mpi_time += t2-t1;
OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel_x2(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 122;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 122;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 5;
desc->args = (ops_arg*)malloc(5*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->function = ops_par_loop_advec_mom_kernel_x2_execute;
if (OPS_diags > 1) {
ops_timing_realloc(122,"advec_mom_kernel_x2");
}
ops_enqueue_kernel(desc);
}
#endif
|
df2ba0c70e9650865bea3b425c8d44a57ac5e497.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// modified from mmdetection
#include "deform_2d_cuda_kernel.cuh"
using namespace at;
template <typename scalar_t>
void deformable_2d_im2col_cuda(hipStream_t stream,
const scalar_t* data_im, const scalar_t* data_offset,
const int batch_size, const int channels,
const int height_im, const int width_im,
const int height_col, const int width_col,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, scalar_t* data_col,
const int order)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
hipLaunchKernelGGL(( deformable_im2col_gpu_kernel<scalar_t>)
, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream,
num_kernels, data_im, data_offset,
height_im, width_im,
kernel_h, kernel_w,
pad_h, pad_w,
stride_h, stride_w,
dilation_h, dilation_w,
channel_per_deformable_group,
batch_size, channels, deformable_group,
height_col, width_col, data_col, order);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in deformable_im2col_cuda: %s\n", hipGetErrorString(err));
}
}
at::Tensor
deform_2d_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int group,
const int deformable_group,
const int im2col_step,
const int order)
{
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
const int im2col_step_ = ::min(batch, im2col_step);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group);
// printf("Kernels: %d %d %d %d %d %d\n", kernel_d_, kernel_h_, kernel_w_, kernel_d, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == (channels_kernel * group),
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto output = at::empty({batch * height_out * width_out, channels_out}, input.options());
// prepare group weight and bias
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
auto bias_g = bias.view({group, channels_out/group});
// define alias for easy use
const int batch_n = im2col_step_;
const int per_input_size = channels * height * width;
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3);
auto output_n = output.view({batch/im2col_step_, batch_n * height_out * width_out, channels_out});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * height_out * width_out}, input.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "deform_conv_forward_cuda", ([&] {
deformable_2d_im2col_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group,
columns.data<scalar_t>(), order);
}));
// auto columns_m = columns.t();
// auto weight_m = weight.view({channels_out, channels_kernel * kernel_d * kernel_h * kernel_w}).t();
// output = at::addmm(bias, columns_m, weight_m);
auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out});
auto output_g = output_n.select(0, n).view({batch_n * height_out * width_out, group, channels_out/group});
for (int g = 0; g < group; ++g)
{
auto columns_gm = columns_g.select(0, g).t();
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t();
auto output_m = at::addmm(bias_g.select(0, g), columns_gm, weight_gm);
output_g.select(1, g) = output_m.view({batch_n * height_out * width_out, channels_out/group});
}
}
output = output.view({batch, height_out, width_out, channels_out}).permute({0, 3, 1, 2}).contiguous();
return output;
}
| df2ba0c70e9650865bea3b425c8d44a57ac5e497.cu | // modified from mmdetection
#include "deform_2d_cuda_kernel.cuh"
using namespace at;
template <typename scalar_t>
void deformable_2d_im2col_cuda(cudaStream_t stream,
const scalar_t* data_im, const scalar_t* data_offset,
const int batch_size, const int channels,
const int height_im, const int width_im,
const int height_col, const int width_col,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, scalar_t* data_col,
const int order)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
deformable_im2col_gpu_kernel<scalar_t>
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
num_kernels, data_im, data_offset,
height_im, width_im,
kernel_h, kernel_w,
pad_h, pad_w,
stride_h, stride_w,
dilation_h, dilation_w,
channel_per_deformable_group,
batch_size, channels, deformable_group,
height_col, width_col, data_col, order);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
at::Tensor
deform_2d_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int group,
const int deformable_group,
const int im2col_step,
const int order)
{
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
const int im2col_step_ = std::min(batch, im2col_step);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group);
// printf("Kernels: %d %d %d %d %d %d\n", kernel_d_, kernel_h_, kernel_w_, kernel_d, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == (channels_kernel * group),
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto output = at::empty({batch * height_out * width_out, channels_out}, input.options());
// prepare group weight and bias
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_h, kernel_w});
auto bias_g = bias.view({group, channels_out/group});
// define alias for easy use
const int batch_n = im2col_step_;
const int per_input_size = channels * height * width;
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3);
auto output_n = output.view({batch/im2col_step_, batch_n * height_out * width_out, channels_out});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto columns = at::empty({channels * kernel_h * kernel_w, batch_n * height_out * width_out}, input.options());
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "deform_conv_forward_cuda", ([&] {
deformable_2d_im2col_cuda(at::cuda::getCurrentCUDAStream(),
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group,
columns.data<scalar_t>(), order);
}));
// auto columns_m = columns.t();
// auto weight_m = weight.view({channels_out, channels_kernel * kernel_d * kernel_h * kernel_w}).t();
// output = at::addmm(bias, columns_m, weight_m);
auto columns_g = columns.view({group, channels/group * kernel_h * kernel_w, batch_n * height_out * width_out});
auto output_g = output_n.select(0, n).view({batch_n * height_out * width_out, group, channels_out/group});
for (int g = 0; g < group; ++g)
{
auto columns_gm = columns_g.select(0, g).t();
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_h * kernel_w}).t();
auto output_m = at::addmm(bias_g.select(0, g), columns_gm, weight_gm);
output_g.select(1, g) = output_m.view({batch_n * height_out * width_out, channels_out/group});
}
}
output = output.view({batch, height_out, width_out, channels_out}).permute({0, 3, 1, 2}).contiguous();
return output;
}
|
dfd5fc78625029206275a01edb061b6662eab5d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __CUDALOCKSBARRIERFAST_CU__
#define __CUDALOCKSBARRIERFAST_CU__
#include "cudaLocks.h"
/*
Helper function to set the passed in inVars flag to 1 (signifies that this TB
has joined the barrier).
*/
inline __device__ void setMyInFlag(unsigned int * inVars,
const unsigned int threadID,
const unsigned int blockID) {
if (threadID == 0)
{
atomicExch((unsigned int *)(inVars + blockID), 1);
}
__syncthreads();
}
/*
Helper function for the main TB of this group to spin, checking to see if
all other TBs joining this barrier have joined or not.
*/
inline __device__ void spinOnInFlags(unsigned int * inVars,
const int threadID,
const int numThreads,
const int numBlocks) {
// local variables
int done3 = 1;
// "main" TB loops, checking if everyone else has joined the barrier.
do
{
done3 = 1;
/*
Each thread in the main TB accesses a subset of the blocks, checking
if they have joined the barrier yet or not.
*/
for (int i = threadID; i < numBlocks; i += numThreads)
{
if (atomicXor((unsigned int *)(inVars + i), 0) != 1) {
done3 = 0;
// if one of them isn't ready, don't bother checking the others (just
// increases traffic)
break;
}
}
} while (!done3);
/*
When all the necessary TBs have joined the barrier, the threads will
reconverge here -- this avoids unnecessary atomic accesses for threads
whose assigned TBs have already joined the barrier.
*/
__syncthreads();
}
/*
Helper function for the main TB of this group to spin, checking to see if
all other TBs joining this barrier have joined or not.
*/
inline __device__ void spinOnInFlags_local(unsigned int * inVars,
const int threadID,
const int numThreads,
const int numBlocks) {
// local variables
int done3 = 1;
// "main" TB loops, checking if everyone else has joined the barrier.
do
{
done3 = 1;
/*
Each thread in the main TB accesses a subset of the blocks, checking
if they have joined the barrier yet or not.
*/
for (int i = threadID; i < numBlocks; i += numThreads)
{
if (atomicXor((unsigned int *)(inVars + i), 0) != 1) {
done3 = 0;
// if one of them isn't ready, don't bother checking the others (just
// increases traffic)
break;
}
}
} while (!done3);
/*
When all the necessary TBs have joined the barrier, the threads will
reconverge here -- this avoids unnecessary atomic accesses for threads
whose assigned TBs have already joined the barrier.
*/
__syncthreads();
}
/*
Helper function for main TB to set the outVars flags for all TBs at this
barrier to notify them that everyone has joined the barrier and they can
proceed.
*/
inline __device__ void setOutFlags(unsigned int * inVars,
unsigned int * outVars,
const int threadID,
const int numThreads,
const int numBlocks) {
for (int i = threadID; i < numBlocks; i += numThreads)
{
/*
inVars can be a regular data access (write) because the atomicExch
to outVars provides the happens-before ordering.
** NOTE: For L2 atomics, this will break, because it does not allow
remotely owned words that also have atomics performed on them. Since
the global barrier has inVars locations accessed by multiple TBs on
different SMs, this requires using an atomic.
*/
//inVars[i] = 0;
atomicOr(&(inVars[i]), 1); // atomicOr is a reprogrammed atomicSt
atomicExch((unsigned int *)(outVars + i), 1);
}
__syncthreads();
}
/*
Helper function for main TB to set the outVars flags for all TBs at this
barrier to notify them that everyone has joined the barrier and they can
proceed.
*/
inline __device__ void setOutFlags_local(unsigned int * inVars,
unsigned int * outVars,
const int threadID,
const int numThreads,
const int numBlocks) {
for (int i = threadID; i < numBlocks; i += numThreads)
{
/*
inVars can be a regular data access (write) because the atomicExch
to outVars provides the happens-before ordering.
** NOTE: For L2 atomics, this will break, because it does not allow
remotely owned words that also have atomics performed on them. Since
the global barrier has inVars locations accessed by multiple TBs on
different SMs, this requires using an atomic.
*/
//inVars[i] = 0;
atomicOr(&(inVars[i]), 1); // atomicOr is a reprogrammed atomicSt
atomicExch((unsigned int *)(outVars + i), 1);
}
__syncthreads();
}
/*
Helper function for each TB to spin waiting for its outVars flag to be set
by the main TB. When it is set, then this TB can safely exit the barrier.
*/
inline __device__ void spinOnMyOutFlag(unsigned int * inVars,
unsigned int * outVars,
const int blockID,
const int threadID) {
if (threadID == 0)
{
while (atomicXor((unsigned int *)(outVars + blockID), 0) != 1) { ; }
//inVars[blockID] = outVars[blockID] = 0;
/*
Have to reset outVars because we were just looping checking if it was
1 or not -- need to use an atomic because no nearby release to ensure
happens-before ordering with subsequent writes to outVars above.
*/
inVars[blockID] = 0;
atomicExch(&(outVars[blockID]), 0);
}
__syncthreads();
}
/*
Helper function for each TB to spin waiting for its outVars flag to be set
by the main TB. When it is set, then this TB can safely exit the barrier.
*/
inline __device__ void spinOnMyOutFlag_local(unsigned int * inVars,
unsigned int * outVars,
const int blockID,
const int threadID) {
if (threadID == 0)
{
while (atomicXor((unsigned int *)(outVars + blockID), 0) != 1) { ; }
//inVars[blockID] = outVars[blockID] = 0;
/*
Have to reset outVars because we were just looping checking if it was
1 or not -- need to use an atomic because no nearby release to ensure
happens-before ordering with subsequent writes to outVars above.
*/
inVars[blockID] = 0;
atomicExch(&(outVars[blockID]), 0);
}
__syncthreads();
}
__device__ void cudaBarrier(unsigned int * barrierBuffers,
const int arrayStride,
const unsigned int numBlocksAtBarr)
{
// local variables
const int threadID = threadIdx.x;
const int blockID = blockIdx.x;
const int numThreads = blockDim.x;
// ** NOTE: setting numBlocks like this only works if the first TB on
// each SM joins the global barrier
const int numBlocks = numBlocksAtBarr;
unsigned int * const inVars = barrierBuffers;
unsigned int * const outVars = barrierBuffers + arrayStride;
/*
Thread 0 from each TB sets its 'private' flag in the in array to 1 to
signify that it has joined the barrier.
*/
setMyInFlag(inVars, threadID, blockID);
// TB 0 is the "main" TB for the global barrier
if (blockID == 0)
{
// "main" TB loops, checking if everyone else has joined the barrier.
spinOnInFlags(inVars, threadID, numThreads, numBlocks);
/*
Once all the TBs arrive at the barrier, the main TB resets them to
notify everyone else that they can move forward beyond the barrier --
again each thread in the main TB takes a subset of the necessary TBs
and sets their in flag to 0 and out flag to 1.
*/
setOutFlags(inVars, outVars, threadID, numThreads, numBlocks);
}
/*
All TBs (including the main one) spin, checking to see if the main one
set their out location yet -- if it did, then they can move ahead
because the barrier is done.
*/
spinOnMyOutFlag(inVars, outVars, blockID, threadID);
}
// same algorithm but per-SM synchronization
__device__ void cudaBarrierLocal(// for global barrier
unsigned int * barrierBuffers,
const unsigned int numBlocksAtBarr,
const int arrayStride,
// for local barrier
unsigned int * perSMBarrierBuffers,
const unsigned int smID,
const unsigned int numTBs_perSM,
const unsigned int perSM_blockID,
const bool isLocalGlobalBarr)
{
// local variables
const int threadID = threadIdx.x;
const int numThreads = blockDim.x;
const int numBlocks = numTBs_perSM;
/*
Each SM has MAX_BLOCKS*2 locations in perSMBarrierBuffers, so my SM's
inVars locations start at perSMBarrierBuffers[smID*2*MAX_BLOCKS] and my
SM's outVars locations start at
perSMBarrierBuffers[smID*2*MAX_BLOCKS + MAX_BLOCKS].
*/
unsigned int * const inVars = perSMBarrierBuffers + (MAX_BLOCKS * smID * 2);
unsigned int * const outVars = perSMBarrierBuffers + ((MAX_BLOCKS * smID * 2) + MAX_BLOCKS);
/*
Thread 0 from each TB sets its 'private' flag in the in array to 1 to
signify that it has joined the barrier.
*/
setMyInFlag(inVars, threadID, perSM_blockID);
// first TB on this SM is the "main" TB for the local barrier
if (perSM_blockID == 0)
{
// "main" TB loops, checking if everyone else has joined the barrier.
spinOnInFlags_local(inVars, threadID, numThreads, numBlocks);
/*
If we are calling the global tree barrier from within the local tree
barrier, call it here. Now that all of the TBs on this SM have joined
the local barrier, TB 0 on this SM joins the global barrier.
*/
if (isLocalGlobalBarr) {
cudaBarrier(barrierBuffers, arrayStride, numBlocksAtBarr);
}
/*
Once all the TBs arrive at the barrier, the main TB resets their inVar
and sets their outVar to notify everyone else that they can move
forward beyond the barrier -- each thread in the main TB takes a subset
of the necessary TBs and sets their in flag to 0 and out flag to 1.
*/
setOutFlags_local(inVars, outVars, threadID, numThreads, numBlocks);
}
/*
All TBs (including the main one) spin, checking to see if the main TB
set their out location yet -- if it did, then they can move ahead
because the barrier is done.
*/
spinOnMyOutFlag_local(inVars, outVars, perSM_blockID, threadID);
}
/*
Decentralized tree barrier that has 1 TB per SM join the global decentralized
barrier in the middle, then sets the out flags of the others on this SM to 1
after returning. This avoids the need for a second local barrier after the
global barrier.
*/
__device__ void cudaBarrierLocalGlobal(// for global barrier
unsigned int * barrierBuffers,
const unsigned int numBlocksAtBarr,
const int arrayStride,
// for local barrier
unsigned int * perSMBarrierBuffers,
const unsigned int smID,
const unsigned int numTBs_perSM,
const unsigned int perSM_blockID)
{
// will call global barrier within it
cudaBarrierLocal(barrierBuffers, numBlocksAtBarr, arrayStride,
perSMBarrierBuffers, smID, numTBs_perSM, perSM_blockID,
true);
}
/*
Helper function for joining the barrier with the 'lock-free' tree barrier.
*/
__device__ void joinLFBarrier_helper(unsigned int * barrierBuffers,
unsigned int * perSMBarrierBuffers,
const unsigned int numBlocksAtBarr,
const int smID,
const int perSM_blockID,
const int numTBs_perSM,
const int arrayStride) {
if (numTBs_perSM > 1) {
cudaBarrierLocalGlobal(barrierBuffers, numBlocksAtBarr, arrayStride,
perSMBarrierBuffers, smID, numTBs_perSM,
perSM_blockID);
} else { // if only 1 TB on the SM, no need for the local barriers
cudaBarrier(barrierBuffers, arrayStride, numBlocksAtBarr);
}
}
#endif
| dfd5fc78625029206275a01edb061b6662eab5d1.cu | #ifndef __CUDALOCKSBARRIERFAST_CU__
#define __CUDALOCKSBARRIERFAST_CU__
#include "cudaLocks.h"
/*
Helper function to set the passed in inVars flag to 1 (signifies that this TB
has joined the barrier).
*/
inline __device__ void setMyInFlag(unsigned int * inVars,
const unsigned int threadID,
const unsigned int blockID) {
if (threadID == 0)
{
atomicExch((unsigned int *)(inVars + blockID), 1);
}
__syncthreads();
}
/*
Helper function for the main TB of this group to spin, checking to see if
all other TBs joining this barrier have joined or not.
*/
inline __device__ void spinOnInFlags(unsigned int * inVars,
const int threadID,
const int numThreads,
const int numBlocks) {
// local variables
int done3 = 1;
// "main" TB loops, checking if everyone else has joined the barrier.
do
{
done3 = 1;
/*
Each thread in the main TB accesses a subset of the blocks, checking
if they have joined the barrier yet or not.
*/
for (int i = threadID; i < numBlocks; i += numThreads)
{
if (atomicXor((unsigned int *)(inVars + i), 0) != 1) {
done3 = 0;
// if one of them isn't ready, don't bother checking the others (just
// increases traffic)
break;
}
}
} while (!done3);
/*
When all the necessary TBs have joined the barrier, the threads will
reconverge here -- this avoids unnecessary atomic accesses for threads
whose assigned TBs have already joined the barrier.
*/
__syncthreads();
}
/*
Helper function for the main TB of this group to spin, checking to see if
all other TBs joining this barrier have joined or not.
*/
inline __device__ void spinOnInFlags_local(unsigned int * inVars,
const int threadID,
const int numThreads,
const int numBlocks) {
// local variables
int done3 = 1;
// "main" TB loops, checking if everyone else has joined the barrier.
do
{
done3 = 1;
/*
Each thread in the main TB accesses a subset of the blocks, checking
if they have joined the barrier yet or not.
*/
for (int i = threadID; i < numBlocks; i += numThreads)
{
if (atomicXor((unsigned int *)(inVars + i), 0) != 1) {
done3 = 0;
// if one of them isn't ready, don't bother checking the others (just
// increases traffic)
break;
}
}
} while (!done3);
/*
When all the necessary TBs have joined the barrier, the threads will
reconverge here -- this avoids unnecessary atomic accesses for threads
whose assigned TBs have already joined the barrier.
*/
__syncthreads();
}
/*
Helper function for main TB to set the outVars flags for all TBs at this
barrier to notify them that everyone has joined the barrier and they can
proceed.
*/
inline __device__ void setOutFlags(unsigned int * inVars,
unsigned int * outVars,
const int threadID,
const int numThreads,
const int numBlocks) {
for (int i = threadID; i < numBlocks; i += numThreads)
{
/*
inVars can be a regular data access (write) because the atomicExch
to outVars provides the happens-before ordering.
** NOTE: For L2 atomics, this will break, because it does not allow
remotely owned words that also have atomics performed on them. Since
the global barrier has inVars locations accessed by multiple TBs on
different SMs, this requires using an atomic.
*/
//inVars[i] = 0;
atomicOr(&(inVars[i]), 1); // atomicOr is a reprogrammed atomicSt
atomicExch((unsigned int *)(outVars + i), 1);
}
__syncthreads();
}
/*
Helper function for main TB to set the outVars flags for all TBs at this
barrier to notify them that everyone has joined the barrier and they can
proceed.
*/
inline __device__ void setOutFlags_local(unsigned int * inVars,
unsigned int * outVars,
const int threadID,
const int numThreads,
const int numBlocks) {
for (int i = threadID; i < numBlocks; i += numThreads)
{
/*
inVars can be a regular data access (write) because the atomicExch
to outVars provides the happens-before ordering.
** NOTE: For L2 atomics, this will break, because it does not allow
remotely owned words that also have atomics performed on them. Since
the global barrier has inVars locations accessed by multiple TBs on
different SMs, this requires using an atomic.
*/
//inVars[i] = 0;
atomicOr(&(inVars[i]), 1); // atomicOr is a reprogrammed atomicSt
atomicExch((unsigned int *)(outVars + i), 1);
}
__syncthreads();
}
/*
Helper function for each TB to spin waiting for its outVars flag to be set
by the main TB. When it is set, then this TB can safely exit the barrier.
*/
inline __device__ void spinOnMyOutFlag(unsigned int * inVars,
unsigned int * outVars,
const int blockID,
const int threadID) {
if (threadID == 0)
{
while (atomicXor((unsigned int *)(outVars + blockID), 0) != 1) { ; }
//inVars[blockID] = outVars[blockID] = 0;
/*
Have to reset outVars because we were just looping checking if it was
1 or not -- need to use an atomic because no nearby release to ensure
happens-before ordering with subsequent writes to outVars above.
*/
inVars[blockID] = 0;
atomicExch(&(outVars[blockID]), 0);
}
__syncthreads();
}
/*
Helper function for each TB to spin waiting for its outVars flag to be set
by the main TB. When it is set, then this TB can safely exit the barrier.
*/
inline __device__ void spinOnMyOutFlag_local(unsigned int * inVars,
unsigned int * outVars,
const int blockID,
const int threadID) {
if (threadID == 0)
{
while (atomicXor((unsigned int *)(outVars + blockID), 0) != 1) { ; }
//inVars[blockID] = outVars[blockID] = 0;
/*
Have to reset outVars because we were just looping checking if it was
1 or not -- need to use an atomic because no nearby release to ensure
happens-before ordering with subsequent writes to outVars above.
*/
inVars[blockID] = 0;
atomicExch(&(outVars[blockID]), 0);
}
__syncthreads();
}
__device__ void cudaBarrier(unsigned int * barrierBuffers,
const int arrayStride,
const unsigned int numBlocksAtBarr)
{
// local variables
const int threadID = threadIdx.x;
const int blockID = blockIdx.x;
const int numThreads = blockDim.x;
// ** NOTE: setting numBlocks like this only works if the first TB on
// each SM joins the global barrier
const int numBlocks = numBlocksAtBarr;
unsigned int * const inVars = barrierBuffers;
unsigned int * const outVars = barrierBuffers + arrayStride;
/*
Thread 0 from each TB sets its 'private' flag in the in array to 1 to
signify that it has joined the barrier.
*/
setMyInFlag(inVars, threadID, blockID);
// TB 0 is the "main" TB for the global barrier
if (blockID == 0)
{
// "main" TB loops, checking if everyone else has joined the barrier.
spinOnInFlags(inVars, threadID, numThreads, numBlocks);
/*
Once all the TBs arrive at the barrier, the main TB resets them to
notify everyone else that they can move forward beyond the barrier --
again each thread in the main TB takes a subset of the necessary TBs
and sets their in flag to 0 and out flag to 1.
*/
setOutFlags(inVars, outVars, threadID, numThreads, numBlocks);
}
/*
All TBs (including the main one) spin, checking to see if the main one
set their out location yet -- if it did, then they can move ahead
because the barrier is done.
*/
spinOnMyOutFlag(inVars, outVars, blockID, threadID);
}
// same algorithm but per-SM synchronization
__device__ void cudaBarrierLocal(// for global barrier
unsigned int * barrierBuffers,
const unsigned int numBlocksAtBarr,
const int arrayStride,
// for local barrier
unsigned int * perSMBarrierBuffers,
const unsigned int smID,
const unsigned int numTBs_perSM,
const unsigned int perSM_blockID,
const bool isLocalGlobalBarr)
{
// local variables
const int threadID = threadIdx.x;
const int numThreads = blockDim.x;
const int numBlocks = numTBs_perSM;
/*
Each SM has MAX_BLOCKS*2 locations in perSMBarrierBuffers, so my SM's
inVars locations start at perSMBarrierBuffers[smID*2*MAX_BLOCKS] and my
SM's outVars locations start at
perSMBarrierBuffers[smID*2*MAX_BLOCKS + MAX_BLOCKS].
*/
unsigned int * const inVars = perSMBarrierBuffers + (MAX_BLOCKS * smID * 2);
unsigned int * const outVars = perSMBarrierBuffers + ((MAX_BLOCKS * smID * 2) + MAX_BLOCKS);
/*
Thread 0 from each TB sets its 'private' flag in the in array to 1 to
signify that it has joined the barrier.
*/
setMyInFlag(inVars, threadID, perSM_blockID);
// first TB on this SM is the "main" TB for the local barrier
if (perSM_blockID == 0)
{
// "main" TB loops, checking if everyone else has joined the barrier.
spinOnInFlags_local(inVars, threadID, numThreads, numBlocks);
/*
If we are calling the global tree barrier from within the local tree
barrier, call it here. Now that all of the TBs on this SM have joined
the local barrier, TB 0 on this SM joins the global barrier.
*/
if (isLocalGlobalBarr) {
cudaBarrier(barrierBuffers, arrayStride, numBlocksAtBarr);
}
/*
Once all the TBs arrive at the barrier, the main TB resets their inVar
and sets their outVar to notify everyone else that they can move
forward beyond the barrier -- each thread in the main TB takes a subset
of the necessary TBs and sets their in flag to 0 and out flag to 1.
*/
setOutFlags_local(inVars, outVars, threadID, numThreads, numBlocks);
}
/*
All TBs (including the main one) spin, checking to see if the main TB
set their out location yet -- if it did, then they can move ahead
because the barrier is done.
*/
spinOnMyOutFlag_local(inVars, outVars, perSM_blockID, threadID);
}
/*
Decentralized tree barrier that has 1 TB per SM join the global decentralized
barrier in the middle, then sets the out flags of the others on this SM to 1
after returning. This avoids the need for a second local barrier after the
global barrier.
*/
__device__ void cudaBarrierLocalGlobal(// for global barrier
unsigned int * barrierBuffers,
const unsigned int numBlocksAtBarr,
const int arrayStride,
// for local barrier
unsigned int * perSMBarrierBuffers,
const unsigned int smID,
const unsigned int numTBs_perSM,
const unsigned int perSM_blockID)
{
// will call global barrier within it
cudaBarrierLocal(barrierBuffers, numBlocksAtBarr, arrayStride,
perSMBarrierBuffers, smID, numTBs_perSM, perSM_blockID,
true);
}
/*
Helper function for joining the barrier with the 'lock-free' tree barrier.
*/
__device__ void joinLFBarrier_helper(unsigned int * barrierBuffers,
unsigned int * perSMBarrierBuffers,
const unsigned int numBlocksAtBarr,
const int smID,
const int perSM_blockID,
const int numTBs_perSM,
const int arrayStride) {
if (numTBs_perSM > 1) {
cudaBarrierLocalGlobal(barrierBuffers, numBlocksAtBarr, arrayStride,
perSMBarrierBuffers, smID, numTBs_perSM,
perSM_blockID);
} else { // if only 1 TB on the SM, no need for the local barriers
cudaBarrier(barrierBuffers, arrayStride, numBlocksAtBarr);
}
}
#endif
|
b6e53b0d2b7af915aab98aca1b56351da6c0a5cd.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the spopecific language governing permissions and
limitations under the License. */
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
DECLARE_bool(cudnn_deterministic);
DECLARE_uint64(conv_workspace_size_limit);
DECLARE_bool(cudnn_exhaustive_search);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using DataLayout = platform::DataLayout;
template <typename T>
using ScalingParamType = typename platform::CudnnDataType<T>::ScalingParamType;
using framework::AlgorithmsCache;
static inline void GetNCDHW(const framework::DDim& dims,
const DataLayout& layout, int* N, int* C, int* D,
int* H, int* W) {
*N = dims[0];
*C = layout == DataLayout::kNCHW ? dims[1] : dims[dims.size() - 1];
int i = layout == DataLayout::kNCHW ? 0 : 1;
if (dims.size() == 5) {
*D = dims[2 - i];
*H = dims[3 - i];
*W = dims[4 - i];
} else {
*D = 1;
*H = dims[2 - i];
*W = dims[3 - i];
}
}
template <typename DeviceContext, typename T, size_t D>
static void Slice_2(const framework::ExecutionContext& context,
const Tensor* input, Tensor* out,
const std::vector<int>& starts,
const std::vector<int>& axes) {
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
auto in_dims = input->dims();
auto new_out_dims = out->dims();
auto offsets = Eigen::array<int, D>();
auto extents = Eigen::array<int, D>();
for (size_t i = 0; i < D; ++i) {
offsets[i] = 0;
extents[i] = new_out_dims[i];
}
int start;
for (size_t i = 0; i < axes.size(); ++i) {
start = starts[i];
if (start < 0) {
start = (start + in_dims[axes[i]]);
}
start = ::max(start, 0);
offsets[axes[i]] = start;
}
auto in_t =
framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
*input);
auto out_t =
framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
*out, new_out_dims);
out_t.device(place) = in_t.slice(offsets, extents);
}
template <typename T>
class CUDNNConvOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
const Tensor* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* output = ctx.Output<Tensor>("Output");
output->mutable_data<T>(ctx.GetPlace());
const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
if (exhaustive_search && FLAGS_cudnn_deterministic) {
PADDLE_THROW(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
const std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// ------------ transformed tensor -----------
Tensor transformed_input_channel(input->type());
Tensor transformed_output(output->type());
T* output_data = nullptr;
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, output,
&transformed_output);
} else {
transformed_input_channel = *input;
transformed_output = *output;
}
output_data = transformed_output.data<T>();
// update padding and dilation
auto in_dims = transformed_input_channel.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2,
0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_input = transformed_input_channel;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* filter_data = filter->data<T>();
// ------------------- cudnn descriptors ---------------------
ConvArgs args{&transformed_input, filter, &transformed_output, strides,
padding_common, dilations};
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
auto dtype = platform::CudnnDataType<T>::type;
DataLayout layout = DataLayout::kNCHW;
if (transformed_input_channel.dims().size() == 5) {
layout = DataLayout::kNCDHW;
}
auto layout_format = GetCudnnTensorFormat(layout);
args.handle = handle;
args.cdesc.set(dtype, padding_common, strides, dilations);
#if CUDNN_VERSION_MIN(7, 0, 1)
// cudnn 7 can support groups, no need to do it manually
// FIXME(typhoonzero): find a better way to disable groups
// rather than setting it to 1.
CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionGroupCount(
args.cdesc.desc(), groups));
groups = 1;
#endif
args.idesc.set(transformed_input, groups);
args.wdesc.set(*filter, layout_format, groups);
args.odesc.set(transformed_output, groups);
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_output.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = filter->numel() / groups;
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size = 0; // final workspace to allocate.
// ------------------- cudnn conv algorithm ---------------------
cudnnConvolutionFwdAlgo_t algo{};
using search = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
algo = search::Find<T>(args, exhaustive_search, false, 0, ctx);
workspace_size = search::GetWorkspaceSize(args, algo);
// ------------------- cudnn conv forward ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward(
handle, &alpha, args.idesc.desc(),
input_data + i * group_offset_in, args.wdesc.desc(),
filter_data + i * group_offset_filter, args.cdesc.desc(), algo,
workspace_ptr, workspace_size, &beta, args.odesc.desc(),
output_data + i * group_offset_out));
},
workspace_size);
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_output, output);
}
}
};
template <typename T>
class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
auto input = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
const T* filter_data = filter->data<T>();
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
}
if (filter_grad) {
filter_grad->mutable_data<T>(ctx.GetPlace());
}
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
if (exhaustive_search && deterministic) {
PADDLE_THROW(
"Can't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensor
Tensor transformed_input_channel(input->type());
Tensor transformed_output_grad_channel(output_grad->type());
Tensor transformed_input_grad_channel(input->type());
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
if (input_grad) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input_grad, &transformed_input_grad_channel);
}
} else {
transformed_input_channel = *input;
transformed_output_grad_channel = *output_grad;
if (input_grad) {
transformed_input_grad_channel.ShareDataWith(*input_grad);
}
}
// update paddings
auto in_dims = transformed_input_channel.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
// cuDNN only supports padding the same amount on every dimension.
// So we create a new padded input tensor.
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input(input->type());
Tensor transformed_input_grad(input->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
transformed_input_grad.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (input_grad) {
transformed_input_grad =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (input_grad) {
transformed_input_grad.ShareDataWith(transformed_input_grad_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* output_grad_data = transformed_output_grad_channel.data<T>();
T* filter_grad_data = nullptr;
T* input_grad_data = nullptr;
T* transformed_input_grad_data = nullptr;
ConvArgs args1{&transformed_input_grad,
filter,
&transformed_output_grad_channel,
strides,
padding_common,
dilations};
ConvArgs args2{&transformed_input,
filter_grad,
&transformed_output_grad_channel,
strides,
padding_common,
dilations};
auto handle = dev_ctx.cudnn_handle();
auto dtype = platform::CudnnDataType<T>::type;
DataLayout layout = DataLayout::kNCHW;
if (input->dims().size() == 5) {
layout = DataLayout::kNCDHW;
}
auto layout_tensor = GetCudnnTensorFormat(layout);
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNCHW, &o_n,
&o_c, &o_d, &o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = filter->numel() / groups;
// ------------------- cudnn backward algorithm ---------------------
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
size_t workspace_size = 0;
int iwo_groups, c_groups;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
if (input_grad) {
// ------------------- cudnn descriptors ---------------------
input_grad_data = input_grad->data<T>();
transformed_input_grad_data = transformed_input_grad.data<T>();
args1.handle = handle;
args1.idesc.set(transformed_input_grad, iwo_groups);
args1.wdesc.set(*filter, layout_tensor, iwo_groups);
args1.odesc.set(transformed_output_grad_channel, iwo_groups);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search1 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search1::Find<T>(args1, exhaustive_search, deterministic, 0, ctx);
workspace_size =
::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo));
}
if (filter_grad) {
// ------------------- cudnn descriptors ---------------------
filter_grad_data = filter_grad->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_input, iwo_groups);
args2.wdesc.set(*filter_grad, layout_tensor, iwo_groups);
args2.odesc.set(transformed_output_grad_channel, iwo_groups);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search2::Find<T>(args2, exhaustive_search, deterministic, 1, ctx);
workspace_size = ::max(workspace_size,
search2::GetWorkspaceSize(args2, filter_algo));
}
// ------------------- cudnn conv backward data ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
if (input_grad) {
// Because beta is zero, it is unnecessary to reset input_grad.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args1.wdesc.desc(),
filter_data + i * group_offset_filter, args1.odesc.desc(),
output_grad_data + i * group_offset_out, args1.cdesc.desc(),
data_algo, cudnn_workspace_ptr, workspace_size, &beta,
args1.idesc.desc(),
transformed_input_grad_data + i * group_offset_in));
},
workspace_size);
}
std::vector<int> starts(transformed_input_channel.dims().size(), 0);
std::vector<int> axes(transformed_input_channel.dims().size(), 0);
for (size_t i = 0; i < transformed_input_channel.dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
transformed_input_grad_channel.mutable_data(ctx.GetPlace());
if (transformed_input_channel.dims().size() == 4) {
Slice_2<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
} else {
Slice_2<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_input_grad_channel, input_grad);
}
}
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
// Because beta is zero, it is unnecessary to reset filter_grad.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args2.idesc.desc(),
input_data + i * group_offset_in, args2.odesc.desc(),
output_grad_data + i * group_offset_out, args2.cdesc.desc(),
filter_algo, cudnn_workspace_ptr, workspace_size, &beta,
args2.wdesc.desc(),
filter_grad_data + i * group_offset_filter));
},
workspace_size);
}
}
}
};
/*
* Inputs: I, W, dO, ddI, ddW
* Outputs: ddO, dW, dI
* ddo = conv(ddI, W) + conv(I, ddW)
* dW = conv_bp_filter(ddI, dO)
* dI = conv_bp_data(ddW, dO)
*/
template <typename T>
class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
auto X = ctx.Input<Tensor>("Input");
auto W = ctx.Input<Tensor>("Filter");
auto dO = ctx.Input<Tensor>("DOutput");
auto ddX = ctx.Input<Tensor>("DDInput");
auto ddW = ctx.Input<Tensor>("DDFilter");
auto ddO = ctx.Output<Tensor>("DDOutput");
auto dW = ctx.Output<Tensor>("DFilter");
auto dX = ctx.Output<Tensor>("DInput");
if (ddO) {
ddO->mutable_data<T>(ctx.GetPlace());
}
if (dW) {
dW->mutable_data<T>(ctx.GetPlace());
}
if (dX) {
dX->mutable_data<T>(ctx.GetPlace());
}
// const T* x = X->data<T>();
const T* dy = dO->data<T>();
const T* w = W->data<T>();
const T* ddx = nullptr;
const T* ddw = nullptr;
T *dw, *dx, *ddy;
dw = dx = ddy = nullptr;
T* transformed_dx = nullptr;
const std::vector<int>& strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
if (exhaustive_search && deterministic) {
PADDLE_THROW(
"Can't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensors to channel first-----------
Tensor transformed_X_channel(X->type());
Tensor transformed_dO_channel(dO->type());
Tensor transformed_ddX_channel(ddX->type());
Tensor transformed_ddO_channel(dO->type());
Tensor transformed_dX_channel(X->type());
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
if (ddO) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddO, &transformed_ddO_channel);
}
if (dX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dX, &transformed_dX_channel);
transformed_dX_channel.mutable_data<T>(ctx.GetPlace());
}
} else {
transformed_X_channel = *X;
transformed_dO_channel = *dO;
transformed_ddX_channel = *ddX;
if (ddO) {
transformed_ddO_channel.ShareDataWith(*ddO);
}
if (dX) {
transformed_dX_channel.ShareDataWith(*dX);
}
}
auto in_dims = transformed_X_channel.dims();
auto filter_dims = W->dims();
framework::DDim in_data_dims =
framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_X(X->type());
Tensor transformed_ddX(X->type());
Tensor transformed_dX(X->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(X->dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_X_channel.dims()[0];
new_input_shape_vec[1] = transformed_X_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_X_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_X.Resize(new_input_shape);
transformed_ddX.Resize(new_input_shape);
transformed_dX.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_X =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
transformed_ddX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (dX) {
transformed_dX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = X->dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_X.ShareDataWith(transformed_X_channel);
transformed_ddX.ShareDataWith(transformed_ddX_channel);
if (dX) {
transformed_dX.ShareDataWith(transformed_dX_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* x = transformed_X.data<T>();
int iwo_group = groups;
int c_group = 1;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
#endif
auto dtype = platform::CudnnDataType<T>::type;
auto handle = dev_ctx.cudnn_handle();
ConvArgs args1{&transformed_ddX, W,
&transformed_ddO_channel, strides,
padding_common, dilations};
ConvArgs args2{&transformed_X, ddW, &transformed_ddO_channel, strides,
padding_common, dilations};
ConvArgs args3{&transformed_ddX, dW, &transformed_dO_channel, strides,
padding_common, dilations};
ConvArgs args4{&transformed_dX, ddW, &transformed_dO_channel, strides,
padding_common, dilations};
cudnnConvolutionFwdAlgo_t fwd_algo1 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionFwdAlgo_t fwd_algo2 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
auto layout = GetCudnnTensorFormat(DataLayout::kNCHW);
// ddo = conv(ddI, W) + conv(I, ddW)
size_t workspace_size = 0;
T* transformed_ddy_channel = nullptr;
if (ddO) {
ddy = ddO->data<T>();
transformed_ddy_channel = transformed_ddO_channel.data<T>();
if (ddX) {
args1.handle = handle;
args1.idesc.set(transformed_ddX, iwo_group);
args1.wdesc.set(*W, layout, iwo_group);
args1.odesc.set(transformed_ddO_channel, iwo_group);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, 0, ctx);
workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1);
}
if (ddW) {
ddw = ddW->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_X, iwo_group);
args2.wdesc.set(*ddW, layout, iwo_group);
args2.odesc.set(transformed_ddO_channel, iwo_group);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search2 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, 0, ctx);
workspace_size = ::max(workspace_size,
search2::GetWorkspaceSize(args2, fwd_algo2));
}
}
if (dW && ddX) {
dw = dW->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_ddX, iwo_group);
args3.wdesc.set(*dW, layout, iwo_group);
args3.odesc.set(transformed_dO_channel, iwo_group);
args3.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search3 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search3::Find<T>(args3, exhaustive_search, deterministic, 1, ctx);
workspace_size = ::max(workspace_size,
search3::GetWorkspaceSize(args3, filter_algo));
}
if (ddW && dX) {
transformed_dx = transformed_dX.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dX, iwo_group);
args4.wdesc.set(*ddW, layout, iwo_group);
args4.odesc.set(transformed_dO_channel, iwo_group);
args4.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search4 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search4::Find<T>(args4, exhaustive_search, deterministic, 2, ctx);
workspace_size =
::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo));
}
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h,
&i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = W->numel() / groups;
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto wkspace_handle = dev_ctx.cudnn_workspace_handle();
if (ddO) {
if (ddX) {
ddx = transformed_ddX.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward(
handle, &alpha, args1.idesc.desc(),
ddx + i * group_offset_in, args1.wdesc.desc(),
w + i * group_offset_filter, args1.cdesc.desc(), fwd_algo1,
workspace_ptr, workspace_size, &beta, args1.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (ddW) {
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward(
handle, &alpha, args2.idesc.desc(), x + i * group_offset_in,
args2.wdesc.desc(), ddw + i * group_offset_filter,
args2.cdesc.desc(), fwd_algo2, workspace_ptr,
workspace_size, &alpha, args2.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_ddO_channel, ddO);
}
}
T* transformed_dy_channel = nullptr;
if (dW && ddX) {
ddx = transformed_ddX.data<T>();
transformed_dy_channel = transformed_dO_channel.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args3.idesc.desc(), ddx + i * group_offset_in,
args3.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args3.cdesc.desc(), filter_algo, workspace_ptr,
workspace_size, &beta, args3.wdesc.desc(),
dw + i * group_offset_filter));
},
workspace_size);
}
}
if (dX && ddW) {
ddw = ddW->data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args4.wdesc.desc(),
ddw + i * group_offset_filter, args4.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args4.cdesc.desc(), data_algo, workspace_ptr, workspace_size,
&beta, args4.idesc.desc(),
transformed_dx + i * group_offset_in));
},
workspace_size);
}
// reverse padded input
std::vector<int> starts(X->dims().size(), 0);
std::vector<int> axes(X->dims().size(), 0);
for (size_t i = 0; i < X->dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
if (X->dims().size() == 4) {
Slice_2<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
} else {
Slice_2<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_dX_channel, dX);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace plat = paddle::platform;
REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>,
paddle::operators::CUDNNConvGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(
conv2d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>);
REGISTER_OP_KERNEL(
conv3d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
| b6e53b0d2b7af915aab98aca1b56351da6c0a5cd.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the spopecific language governing permissions and
limitations under the License. */
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#include "paddle/fluid/operators/conv_cudnn_op_cache.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
DECLARE_bool(cudnn_deterministic);
DECLARE_uint64(conv_workspace_size_limit);
DECLARE_bool(cudnn_exhaustive_search);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using DataLayout = platform::DataLayout;
template <typename T>
using ScalingParamType = typename platform::CudnnDataType<T>::ScalingParamType;
using framework::AlgorithmsCache;
static inline void GetNCDHW(const framework::DDim& dims,
const DataLayout& layout, int* N, int* C, int* D,
int* H, int* W) {
*N = dims[0];
*C = layout == DataLayout::kNCHW ? dims[1] : dims[dims.size() - 1];
int i = layout == DataLayout::kNCHW ? 0 : 1;
if (dims.size() == 5) {
*D = dims[2 - i];
*H = dims[3 - i];
*W = dims[4 - i];
} else {
*D = 1;
*H = dims[2 - i];
*W = dims[3 - i];
}
}
template <typename DeviceContext, typename T, size_t D>
static void Slice_2(const framework::ExecutionContext& context,
const Tensor* input, Tensor* out,
const std::vector<int>& starts,
const std::vector<int>& axes) {
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
auto in_dims = input->dims();
auto new_out_dims = out->dims();
auto offsets = Eigen::array<int, D>();
auto extents = Eigen::array<int, D>();
for (size_t i = 0; i < D; ++i) {
offsets[i] = 0;
extents[i] = new_out_dims[i];
}
int start;
for (size_t i = 0; i < axes.size(); ++i) {
start = starts[i];
if (start < 0) {
start = (start + in_dims[axes[i]]);
}
start = std::max(start, 0);
offsets[axes[i]] = start;
}
auto in_t =
framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
*input);
auto out_t =
framework::EigenTensor<T, D, Eigen::RowMajor, Eigen::DenseIndex>::From(
*out, new_out_dims);
out_t.device(place) = in_t.slice(offsets, extents);
}
template <typename T>
class CUDNNConvOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
const Tensor* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* output = ctx.Output<Tensor>("Output");
output->mutable_data<T>(ctx.GetPlace());
const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
if (exhaustive_search && FLAGS_cudnn_deterministic) {
PADDLE_THROW(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
const std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// ------------ transformed tensor -----------
Tensor transformed_input_channel(input->type());
Tensor transformed_output(output->type());
T* output_data = nullptr;
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, output,
&transformed_output);
} else {
transformed_input_channel = *input;
transformed_output = *output;
}
output_data = transformed_output.data<T>();
// update padding and dilation
auto in_dims = transformed_input_channel.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2,
0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_input = transformed_input_channel;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* filter_data = filter->data<T>();
// ------------------- cudnn descriptors ---------------------
ConvArgs args{&transformed_input, filter, &transformed_output, strides,
padding_common, dilations};
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
auto dtype = platform::CudnnDataType<T>::type;
DataLayout layout = DataLayout::kNCHW;
if (transformed_input_channel.dims().size() == 5) {
layout = DataLayout::kNCDHW;
}
auto layout_format = GetCudnnTensorFormat(layout);
args.handle = handle;
args.cdesc.set(dtype, padding_common, strides, dilations);
#if CUDNN_VERSION_MIN(7, 0, 1)
// cudnn 7 can support groups, no need to do it manually
// FIXME(typhoonzero): find a better way to disable groups
// rather than setting it to 1.
CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionGroupCount(
args.cdesc.desc(), groups));
groups = 1;
#endif
args.idesc.set(transformed_input, groups);
args.wdesc.set(*filter, layout_format, groups);
args.odesc.set(transformed_output, groups);
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_output.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = filter->numel() / groups;
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size = 0; // final workspace to allocate.
// ------------------- cudnn conv algorithm ---------------------
cudnnConvolutionFwdAlgo_t algo{};
using search = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
algo = search::Find<T>(args, exhaustive_search, false, 0, ctx);
workspace_size = search::GetWorkspaceSize(args, algo);
// ------------------- cudnn conv forward ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward(
handle, &alpha, args.idesc.desc(),
input_data + i * group_offset_in, args.wdesc.desc(),
filter_data + i * group_offset_filter, args.cdesc.desc(), algo,
workspace_ptr, workspace_size, &beta, args.odesc.desc(),
output_data + i * group_offset_out));
},
workspace_size);
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_output, output);
}
}
};
template <typename T>
class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
auto input = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
const T* filter_data = filter->data<T>();
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
}
if (filter_grad) {
filter_grad->mutable_data<T>(ctx.GetPlace());
}
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
if (exhaustive_search && deterministic) {
PADDLE_THROW(
"Can't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensor
Tensor transformed_input_channel(input->type());
Tensor transformed_output_grad_channel(output_grad->type());
Tensor transformed_input_grad_channel(input->type());
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
if (input_grad) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input_grad, &transformed_input_grad_channel);
}
} else {
transformed_input_channel = *input;
transformed_output_grad_channel = *output_grad;
if (input_grad) {
transformed_input_grad_channel.ShareDataWith(*input_grad);
}
}
// update paddings
auto in_dims = transformed_input_channel.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
// cuDNN only supports padding the same amount on every dimension.
// So we create a new padded input tensor.
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input(input->type());
Tensor transformed_input_grad(input->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
transformed_input_grad.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (input_grad) {
transformed_input_grad =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (input_grad) {
transformed_input_grad.ShareDataWith(transformed_input_grad_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* output_grad_data = transformed_output_grad_channel.data<T>();
T* filter_grad_data = nullptr;
T* input_grad_data = nullptr;
T* transformed_input_grad_data = nullptr;
ConvArgs args1{&transformed_input_grad,
filter,
&transformed_output_grad_channel,
strides,
padding_common,
dilations};
ConvArgs args2{&transformed_input,
filter_grad,
&transformed_output_grad_channel,
strides,
padding_common,
dilations};
auto handle = dev_ctx.cudnn_handle();
auto dtype = platform::CudnnDataType<T>::type;
DataLayout layout = DataLayout::kNCHW;
if (input->dims().size() == 5) {
layout = DataLayout::kNCDHW;
}
auto layout_tensor = GetCudnnTensorFormat(layout);
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNCHW, &o_n,
&o_c, &o_d, &o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = filter->numel() / groups;
// ------------------- cudnn backward algorithm ---------------------
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
size_t workspace_size = 0;
int iwo_groups, c_groups;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
if (input_grad) {
// ------------------- cudnn descriptors ---------------------
input_grad_data = input_grad->data<T>();
transformed_input_grad_data = transformed_input_grad.data<T>();
args1.handle = handle;
args1.idesc.set(transformed_input_grad, iwo_groups);
args1.wdesc.set(*filter, layout_tensor, iwo_groups);
args1.odesc.set(transformed_output_grad_channel, iwo_groups);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search1 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search1::Find<T>(args1, exhaustive_search, deterministic, 0, ctx);
workspace_size =
std::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo));
}
if (filter_grad) {
// ------------------- cudnn descriptors ---------------------
filter_grad_data = filter_grad->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_input, iwo_groups);
args2.wdesc.set(*filter_grad, layout_tensor, iwo_groups);
args2.odesc.set(transformed_output_grad_channel, iwo_groups);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_groups);
using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search2::Find<T>(args2, exhaustive_search, deterministic, 1, ctx);
workspace_size = std::max(workspace_size,
search2::GetWorkspaceSize(args2, filter_algo));
}
// ------------------- cudnn conv backward data ---------------------
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
if (input_grad) {
// Because beta is zero, it is unnecessary to reset input_grad.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args1.wdesc.desc(),
filter_data + i * group_offset_filter, args1.odesc.desc(),
output_grad_data + i * group_offset_out, args1.cdesc.desc(),
data_algo, cudnn_workspace_ptr, workspace_size, &beta,
args1.idesc.desc(),
transformed_input_grad_data + i * group_offset_in));
},
workspace_size);
}
std::vector<int> starts(transformed_input_channel.dims().size(), 0);
std::vector<int> axes(transformed_input_channel.dims().size(), 0);
for (size_t i = 0; i < transformed_input_channel.dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
transformed_input_grad_channel.mutable_data(ctx.GetPlace());
if (transformed_input_channel.dims().size() == 4) {
Slice_2<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
} else {
Slice_2<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_input_grad_channel, input_grad);
}
}
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
// Because beta is zero, it is unnecessary to reset filter_grad.
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args2.idesc.desc(),
input_data + i * group_offset_in, args2.odesc.desc(),
output_grad_data + i * group_offset_out, args2.cdesc.desc(),
filter_algo, cudnn_workspace_ptr, workspace_size, &beta,
args2.wdesc.desc(),
filter_grad_data + i * group_offset_filter));
},
workspace_size);
}
}
}
};
/*
* Inputs: I, W, dO, ddI, ddW
* Outputs: ddO, dW, dI
* ddo = conv(ddI, W) + conv(I, ddW)
* dW = conv_bp_filter(ddI, dO)
* dI = conv_bp_data(ddW, dO)
*/
template <typename T>
class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
auto X = ctx.Input<Tensor>("Input");
auto W = ctx.Input<Tensor>("Filter");
auto dO = ctx.Input<Tensor>("DOutput");
auto ddX = ctx.Input<Tensor>("DDInput");
auto ddW = ctx.Input<Tensor>("DDFilter");
auto ddO = ctx.Output<Tensor>("DDOutput");
auto dW = ctx.Output<Tensor>("DFilter");
auto dX = ctx.Output<Tensor>("DInput");
if (ddO) {
ddO->mutable_data<T>(ctx.GetPlace());
}
if (dW) {
dW->mutable_data<T>(ctx.GetPlace());
}
if (dX) {
dX->mutable_data<T>(ctx.GetPlace());
}
// const T* x = X->data<T>();
const T* dy = dO->data<T>();
const T* w = W->data<T>();
const T* ddx = nullptr;
const T* ddw = nullptr;
T *dw, *dx, *ddy;
dw = dx = ddy = nullptr;
T* transformed_dx = nullptr;
const std::vector<int>& strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
if (exhaustive_search && deterministic) {
PADDLE_THROW(
"Can't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time.");
}
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensors to channel first-----------
Tensor transformed_X_channel(X->type());
Tensor transformed_dO_channel(dO->type());
Tensor transformed_ddX_channel(ddX->type());
Tensor transformed_ddO_channel(dO->type());
Tensor transformed_dX_channel(X->type());
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
if (ddO) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddO, &transformed_ddO_channel);
}
if (dX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dX, &transformed_dX_channel);
transformed_dX_channel.mutable_data<T>(ctx.GetPlace());
}
} else {
transformed_X_channel = *X;
transformed_dO_channel = *dO;
transformed_ddX_channel = *ddX;
if (ddO) {
transformed_ddO_channel.ShareDataWith(*ddO);
}
if (dX) {
transformed_dX_channel.ShareDataWith(*dX);
}
}
auto in_dims = transformed_X_channel.dims();
auto filter_dims = W->dims();
framework::DDim in_data_dims =
framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_X(X->type());
Tensor transformed_ddX(X->type());
Tensor transformed_dX(X->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(X->dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_X_channel.dims()[0];
new_input_shape_vec[1] = transformed_X_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_X_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_X.Resize(new_input_shape);
transformed_ddX.Resize(new_input_shape);
transformed_dX.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_X =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
transformed_ddX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (dX) {
transformed_dX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = X->dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
} break;
default:
PADDLE_THROW("ConvOp only support tensors with 4 or 5 dimensions.");
}
} else {
transformed_X.ShareDataWith(transformed_X_channel);
transformed_ddX.ShareDataWith(transformed_ddX_channel);
if (dX) {
transformed_dX.ShareDataWith(transformed_dX_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* x = transformed_X.data<T>();
int iwo_group = groups;
int c_group = 1;
#if CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
#endif
auto dtype = platform::CudnnDataType<T>::type;
auto handle = dev_ctx.cudnn_handle();
ConvArgs args1{&transformed_ddX, W,
&transformed_ddO_channel, strides,
padding_common, dilations};
ConvArgs args2{&transformed_X, ddW, &transformed_ddO_channel, strides,
padding_common, dilations};
ConvArgs args3{&transformed_ddX, dW, &transformed_dO_channel, strides,
padding_common, dilations};
ConvArgs args4{&transformed_dX, ddW, &transformed_dO_channel, strides,
padding_common, dilations};
cudnnConvolutionFwdAlgo_t fwd_algo1 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionFwdAlgo_t fwd_algo2 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
auto layout = GetCudnnTensorFormat(DataLayout::kNCHW);
// ddo = conv(ddI, W) + conv(I, ddW)
size_t workspace_size = 0;
T* transformed_ddy_channel = nullptr;
if (ddO) {
ddy = ddO->data<T>();
transformed_ddy_channel = transformed_ddO_channel.data<T>();
if (ddX) {
args1.handle = handle;
args1.idesc.set(transformed_ddX, iwo_group);
args1.wdesc.set(*W, layout, iwo_group);
args1.odesc.set(transformed_ddO_channel, iwo_group);
args1.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, 0, ctx);
workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1);
}
if (ddW) {
ddw = ddW->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_X, iwo_group);
args2.wdesc.set(*ddW, layout, iwo_group);
args2.odesc.set(transformed_ddO_channel, iwo_group);
args2.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search2 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, 0, ctx);
workspace_size = std::max(workspace_size,
search2::GetWorkspaceSize(args2, fwd_algo2));
}
}
if (dW && ddX) {
dw = dW->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_ddX, iwo_group);
args3.wdesc.set(*dW, layout, iwo_group);
args3.odesc.set(transformed_dO_channel, iwo_group);
args3.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search3 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search3::Find<T>(args3, exhaustive_search, deterministic, 1, ctx);
workspace_size = std::max(workspace_size,
search3::GetWorkspaceSize(args3, filter_algo));
}
if (ddW && dX) {
transformed_dx = transformed_dX.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dX, iwo_group);
args4.wdesc.set(*ddW, layout, iwo_group);
args4.odesc.set(transformed_dO_channel, iwo_group);
args4.cdesc.set(dtype, padding_common, strides, dilations, c_group);
using search4 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search4::Find<T>(args4, exhaustive_search, deterministic, 2, ctx);
workspace_size =
std::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo));
}
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h,
&i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = W->numel() / groups;
ScalingParamType<T> alpha = 1.0f, beta = 0.0f;
auto wkspace_handle = dev_ctx.cudnn_workspace_handle();
if (ddO) {
if (ddX) {
ddx = transformed_ddX.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward(
handle, &alpha, args1.idesc.desc(),
ddx + i * group_offset_in, args1.wdesc.desc(),
w + i * group_offset_filter, args1.cdesc.desc(), fwd_algo1,
workspace_ptr, workspace_size, &beta, args1.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (ddW) {
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward(
handle, &alpha, args2.idesc.desc(), x + i * group_offset_in,
args2.wdesc.desc(), ddw + i * group_offset_filter,
args2.cdesc.desc(), fwd_algo2, workspace_ptr,
workspace_size, &alpha, args2.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_ddO_channel, ddO);
}
}
T* transformed_dy_channel = nullptr;
if (dW && ddX) {
ddx = transformed_ddX.data<T>();
transformed_dy_channel = transformed_dO_channel.data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args3.idesc.desc(), ddx + i * group_offset_in,
args3.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args3.cdesc.desc(), filter_algo, workspace_ptr,
workspace_size, &beta, args3.wdesc.desc(),
dw + i * group_offset_filter));
},
workspace_size);
}
}
if (dX && ddW) {
ddw = ddW->data<T>();
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args4.wdesc.desc(),
ddw + i * group_offset_filter, args4.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args4.cdesc.desc(), data_algo, workspace_ptr, workspace_size,
&beta, args4.idesc.desc(),
transformed_dx + i * group_offset_in));
},
workspace_size);
}
// reverse padded input
std::vector<int> starts(X->dims().size(), 0);
std::vector<int> axes(X->dims().size(), 0);
for (size_t i = 0; i < X->dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
if (X->dims().size() == 4) {
Slice_2<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
} else {
Slice_2<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_dX_channel, dX);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace plat = paddle::platform;
REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>,
paddle::operators::CUDNNConvGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(
conv2d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>);
REGISTER_OP_KERNEL(
conv3d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
|
ea3adfeeb56b4adac638831994876bd78c272873.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostram>
#include<cuda.h>
#include<stdio.h>
#include<math.h>
#define fram 64
#define par 50000
__global__ int parMesh(float d_p[par][2] , int d_net[fram][fram] )
{
int i = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if(i >= par|| col>=2)
{
return 0 ;
}
else
{
float x = d_p[i][0];
float y = d_p[i][1];
int left = (int)floor(x);
int right = left + 1;
int bottom = (int)floor(y);
int top = bottom +1;
//if ((left<=32)&&(top<=32))
/* code */
float fL = x - left;
float fR = 1 - fL;
float fB = y - bottom;
float fT = 1 - fB;
d_net[left][bottom] = net[left][bottom]+( fT * fR ) ;
d_net[right][bottom] = net[right][bottom] + ( fT * fL ) ;
d_net[left][top] = net[left][top] + ( fB * fR ) ;
d_net[right][top] = net[right][top] + ( fB * fL ) ;
}
}
int main()
{
//Writing the results in a file
FILE *f = fopen("parallelCuda.txt", "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
for ( i = 0; i < 64; ++i)
{
for ( j = 0; j < 64; j++)
{
fprintf (f,"%f ,",net[i][j] );
}
fprintf (f,"\n" );
}
fclose(f);
}
| ea3adfeeb56b4adac638831994876bd78c272873.cu | #include<iostram>
#include<cuda.h>
#include<stdio.h>
#include<math.h>
#define fram 64
#define par 50000
__global__ int parMesh(float d_p[par][2] , int d_net[fram][fram] )
{
int i = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if(i >= par|| col>=2)
{
return 0 ;
}
else
{
float x = d_p[i][0];
float y = d_p[i][1];
int left = (int)floor(x);
int right = left + 1;
int bottom = (int)floor(y);
int top = bottom +1;
//if ((left<=32)&&(top<=32))
/* code */
float fL = x - left;
float fR = 1 - fL;
float fB = y - bottom;
float fT = 1 - fB;
d_net[left][bottom] = net[left][bottom]+( fT * fR ) ;
d_net[right][bottom] = net[right][bottom] + ( fT * fL ) ;
d_net[left][top] = net[left][top] + ( fB * fR ) ;
d_net[right][top] = net[right][top] + ( fB * fL ) ;
}
}
int main()
{
//Writing the results in a file
FILE *f = fopen("parallelCuda.txt", "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
for ( i = 0; i < 64; ++i)
{
for ( j = 0; j < 64; j++)
{
fprintf (f,"%f ,",net[i][j] );
}
fprintf (f,"\n" );
}
fclose(f);
}
|
a705630536e644f834378cb8384729e32eac75a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// TSDFVolume.cpp
// TSDF
//
// Created by Dave on 11/03/2016.
// Copyright 2016 Sindesso. All rights reserved.
//
#include "libsdf/cuda_utilities.hpp"
#include "libsdf/cuda_coordinate_transforms.hpp"
#include "libsdf/TSDFVolume.hpp"
#include "libsdf/GPURaycaster.hpp"
#include "libsdf/TSDF_utilities.hpp"
#include <fstream>
#include <iomanip>
#include <cfloat>
#include <cstdint>
#include "math_constants.h"
const float POINT_EPSILON=0.001f;
/**
* Compute the index into the voxel space for a given x,y,z coordinate
* @param size The size (in voxels) of the volume
* @param x The x coord
* @param y The y coord
* @param z The z coord
* @return The index
*/
__device__ __forceinline__
size_t index( const dim3& size, int x, int y, int z ) {
return x + (y * size.x) + (z * size.x * size.y);
};
/**
* @param size The size (in voxels) of the volume
* @param distance_data The depth data for the volume
* @param x The horizontal voxel coord
* @param y The vertical voxel coord
* @param z The depth voxel coord
* @return The distance to the surface at that voxel
*/
__device__ __forceinline__
float distance( const dim3& size, float *distance_data, int x, int y, int z ) {
return distance_data[ index( size, x, y, z) ];
}
/**
* Set the distance to the surface at a grid point
* @param size The size (in voxels) of the volume
* @param distance_data The depth data for the volume
* @param x The horizontal voxel coord
* @param y The vertical voxel coord
* @param z The depth voxel coord
* @param distance The distance to set
*/
__device__ __forceinline__
void set_distance(const dim3& size, float * distance_data, int x, int y, int z, float distance ) {
size_t idx = index( size, x, y, z );
distance_data[ idx ] = distance;
}
/**
* @param size The size (in voxels) of the volume
* @param weights The weight data for the volume
* @param x The horizontal voxel coord
* @param y The vertical voxel coord
* @param z The depth voxel coord
* @return The weight at that voxel
*/
__device__ __forceinline__
float weight( const dim3& size, float * weights, int x, int y, int z ) {
return weights[ index(size, x, y, z) ];
}
/**
* @param size The size (in voxels) of the volume
* @param weights The weight data for the volume
* @param x The horizontal voxel coord
* @param y The vertical voxel coord
* @param z The depth voxel coord
* @param weight The weight to set
* @return The weight at that voxel
*/
__device__ __forceinline__
void set_weight( const dim3& size, float * weights, int x, int y, int z, float weight ) {
weights[ index(size, x, y, z) ] = weight;
}
/**
* Obtain indices and trilinear coefficients for for the gridpoints which surround the given point in space
* @param point The point in TSDF coordinate space
* @param voxel_grid_size Dimensions of the TSDF
* @param voxel_size The physical size of a single voxel
* @param indices An array of indices of the voxels surrounding the given point
* ordered as (minx, miny, minz), (maxx, miny, minz), (maxx, miny, maxz), (maxx, miny, minz) and then the maxz values
* @return true If the values in indices are valid (ie point is in TSDF space)
*/
__device__
bool get_trilinear_elements( const float3 point,
const dim3 voxel_grid_size,
const float3 voxel_size,
int * const indices,
float * const coefficients ) {
bool is_valid = false;
// Manage boundary points
float3 max_values {
voxel_grid_size.x * voxel_size.x,
voxel_grid_size.y * voxel_size.y,
voxel_grid_size.z * voxel_size.z
};
float3 adjusted_point = point;
if( (point.x > max_values.x) && ( point.x - max_values.x < POINT_EPSILON ) ) adjusted_point.x = max_values.x - POINT_EPSILON;
if( (point.y > max_values.y) && ( point.y - max_values.y < POINT_EPSILON ) ) adjusted_point.y = max_values.y - POINT_EPSILON;
if( (point.z > max_values.z) && ( point.z - max_values.z < POINT_EPSILON ) ) adjusted_point.z = max_values.z - POINT_EPSILON;
if( point.x < -POINT_EPSILON ) adjusted_point.x = 0.0f;
if( point.y < -POINT_EPSILON ) adjusted_point.y = 0.0f;
if( point.z < -POINT_EPSILON ) adjusted_point.z = 0.0f;
// Get the voxel containing this point
int3 voxel = voxel_for_point( adjusted_point, voxel_size );
// Handle voxel out of bounds
if ( voxel.x >= 0 && voxel.y >= 0 && voxel.z >= 0 && voxel.x < voxel_grid_size.x && voxel.y < voxel_grid_size.y && voxel.z < voxel_grid_size.z) {
// Get the centre of the voxel
float3 v_centre = centre_of_voxel_at( voxel.x, voxel.y, voxel.z, voxel_size );
// Set up the lower bound for trilinear interpolation
int3 lower;
lower.x = (adjusted_point.x < v_centre.x) ? voxel.x - 1 : voxel.x;
lower.y = (adjusted_point.y < v_centre.y) ? voxel.y - 1 : voxel.y;
lower.z = (adjusted_point.z < v_centre.z) ? voxel.z - 1 : voxel.z;
// Handle lower out of bounds
lower.x = max( lower.x, 0 );
lower.y = max( lower.y, 0 );
lower.z = max( lower.z, 0 );
// Compute u,v,w
float3 lower_centre = centre_of_voxel_at( lower.x, lower.y, lower.z, voxel_size );
float3 uvw = f3_sub( adjusted_point, lower_centre );
uvw = f3_div_elem( uvw, voxel_size );
float u = uvw.x;
float v = uvw.y;
float w = uvw.z;
// Populate indices
int delta_x = 1;
int delta_y = voxel_grid_size.x;
int delta_z = voxel_grid_size.x * voxel_grid_size.y;
indices[0] = lower.x + ( lower.y * voxel_grid_size.x ) + ( lower.z * voxel_grid_size.x * voxel_grid_size.y );
indices[1] = indices[0] + delta_x;
indices[2] = indices[1] + delta_z;
indices[3] = indices[0] + delta_z;
indices[4] = indices[0] + delta_y;
indices[5] = indices[1] + delta_y;
indices[6] = indices[2] + delta_y;
indices[7] = indices[3] + delta_y;
// And coefficients
coefficients[0] = (1 - u) * (1 - v) * (1 - w);
coefficients[1] = u * (1 - v) * (1 - w);
coefficients[2] = u * (1 - v) * w ;
coefficients[3] = (1 - u) * (1 - v) * w;
coefficients[4] = (1 - u) * v * (1 - w);
coefficients[5] = u * v * (1 - w);
coefficients[6] = (1 - u) * v * w;
coefficients[7] = u * v * w;
}
// Voxel is out of bounds and so can't be used.
else {
printf( "Point outside of voxel space %f, %f, %f\n", point.x, point.y, point.z );
}
return is_valid;
}
/**
* Apply rotation to point
* @param rotation The rotation expressed as 3 Euler angles
* @param point The point to rotate
* @return The rotated point
*/
__device__
float3 rotate( const float3 point, const float3 rotation ) {
float c1 = cos( rotation.x );
float c2 = cos( rotation.y );
float c3 = cos( rotation.z );
float s1 = sin( rotation.x );
float s2 = sin( rotation.y );
float s3 = sin( rotation.z );
float rx = (c2 * c3) * point.x - (c2 * s3 ) * point.y + s2 * point.z;
float ry = (c1*s3 + s1*s2*c3) * point.x + (c1*c3-s1*s2*s3) * point.y - (s1 * c2 ) * point.z;
float rz = (s1*s3 - c1*s2*c3) * point.x + (s1*c3 + c1*s2*s3)* point.y + (c1 * c2 ) * point.z;
return make_float3( rx, ry, rz );
}
/**
* Apply the TSDF deformation field to a collection of points, deforming them in place
* @param global_rotation The global rotation of the space
* @param global_translation The global translation of the space
* @param deformation_nodes An array of DeformationNodes
* @param voxel_grid_size The voxel size of the space
* @param voxel_space_size The physical size of the space
* @param points The points to be transformed in world coordinates
* @param num_points The number of points to be transformed
*/
__global__
void deformation_kernel( const float3 global_rotation,
const float3 global_translation,
const TSDFVolume::DeformationNode * const deformation_nodes,
const dim3 voxel_grid_size,
const float3 voxel_size,
const float3 voxel_space_size,
const float3 offset,
const int num_points,
float3 * points) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx < num_points ) {
float3 point = points[idx];
// Get indices of neighbours
point = f3_sub( point, offset );
int neighbours[8];
float coefficients[8];
get_trilinear_elements( point, voxel_grid_size, voxel_size, neighbours, coefficients );
// Compute the deformation at this point
float3 deformed_point{ 0.0f, 0.0f, 0.0f };
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[0], deformation_nodes[ neighbours[0] ].translation ) );
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[1], deformation_nodes[ neighbours[1] ].translation ) );
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[2], deformation_nodes[ neighbours[2] ].translation ) );
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[3], deformation_nodes[ neighbours[3] ].translation ) );
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[4], deformation_nodes[ neighbours[4] ].translation ) );
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[5], deformation_nodes[ neighbours[5] ].translation ) );
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[6], deformation_nodes[ neighbours[6] ].translation ) );
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[7], deformation_nodes[ neighbours[7] ].translation ) );
// Apply global rotation
deformed_point = rotate( deformed_point, global_rotation );
// Apply global translation
deformed_point = f3_add( deformed_point, global_translation );
// Set this to output point
points[idx] = deformed_point;
}
}
/**
*
*/
void TSDFVolume::deform_mesh( const int num_points, float3 * points ) const {
// Copy the point array to the device
float3 * d_points;
cudaSafeAlloc( (void **) &d_points, num_points * sizeof( float3 ), "d_points" );
hipError_t err = hipMemcpy( d_points, points, num_points * sizeof( float3 ), hipMemcpyHostToDevice );
check_cuda_error( "Failed to copy points to device for deformation", err);
dim3 block( 512, 1, 1 );
dim3 grid ( divUp( num_points, block.x ), 1, 1 );
hipLaunchKernelGGL(( deformation_kernel), dim3(grid), dim3(block) , 0, 0, m_global_rotation,
m_global_translation,
m_deformation_nodes,
m_size,
m_voxel_size,
m_physical_size,
m_offset,
num_points,
d_points );
hipDeviceSynchronize( );
err = hipGetLastError();
check_cuda_error( "Deformation kernel failed", err);
err = hipMemcpy( points, d_points, num_points * sizeof( float3 ), hipMemcpyDeviceToHost );
check_cuda_error( "Failed to copy points from device after deformation", err);
cudaSafeFree( d_points, "d_points");
}
/**
* @param distance_data The voxel values (in device memory)
* @param weight_data The weight values (in device memory)
* @param voxel_grid_size The voxel size of the space
* @param voxel_space_size The physical size of the space
* @param offset The offset of the front, bottom, left corner
* @param trunc_distance A distance, greater than the voxel diagonal, at which we truncate distance measures in the TSDF
* @param pose The camera pose matrix (maps cam to world, 4x4 )
* @param inv_pose Inverse of the camera pose matrix (maps world to camera coords) (4x4)
* @param k The camera's intrinsic parameters (3x3)
* @param kinv Invers eof k (3x3)
* @param width Width of the depth image
* @param height Height of the depth image
* @param depth_map Pointer to array of width*height uint16 types in devcie memory
*/
__global__
void integrate_kernel( float * distance_data,
float * weight_data,
dim3 voxel_grid_size,
float3 voxel_space_size,
TSDFVolume::DeformationNode * deformation_nodes,
float3 offset,
const float trunc_distance,
const float max_weight,
Mat44 pose,
Mat44 inv_pose,
Mat33 k,
Mat33 kinv,
uint32_t width,
uint32_t height,
const uint16_t * depth_map) {
// Extract the voxel Y and Z coordinates we then iterate over X
int vy = threadIdx.y + blockIdx.y * blockDim.y;
int vz = threadIdx.z + blockIdx.z * blockDim.z;
// If this thread is in range
if ( vy < voxel_grid_size.y && vz < voxel_grid_size.z ) {
// The next (x_size) elements from here are the x coords
int voxel_index = ((voxel_grid_size.x * voxel_grid_size.y) * vz ) + (voxel_grid_size.x * vy);
// For each voxel in this column
for ( int vx = 0; vx < voxel_grid_size.x; vx++ ) {
// Work out where in the image, the centre of this voxel projects
// This gives us a pixel in the depth map
// Convert voxel to world coords of deformed centre
float3 centre_of_voxel = f3_add( offset, deformation_nodes[ voxel_index ].translation);
// Convert world to pixel coords
int3 centre_of_voxel_in_pix = world_to_pixel( centre_of_voxel, inv_pose, k );
// if this point is in the camera view frustum...
if ( ( centre_of_voxel_in_pix.x >= 0 ) && ( centre_of_voxel_in_pix.x < width ) && ( centre_of_voxel_in_pix.y >= 0 ) && ( centre_of_voxel_in_pix.y < height) ) {
// Extract the depth to the surface at this point
uint32_t voxel_pixel_index = centre_of_voxel_in_pix.y * width + centre_of_voxel_in_pix.x;
uint16_t surface_depth = depth_map[ voxel_pixel_index ];
// If the depth is valid
if ( surface_depth > 0 ) {
// Project depth entry to a vertex ( in camera space)
float3 surface_vertex = pixel_to_camera( centre_of_voxel_in_pix, kinv, surface_depth );
// Compute the SDF is the distance between the camera origin and surface_vertex in world coordinates
float3 voxel_cam = world_to_camera( centre_of_voxel, inv_pose );
float sdf = surface_vertex.z - voxel_cam.z;
if( sdf >= -trunc_distance ) {
// Truncate the sdf to the range -trunc_distance -> trunc_distance
float tsdf;
if ( sdf > 0 ) {
tsdf = min( sdf, trunc_distance);
} else {
tsdf = sdf;
}
// Extract prior weight
float prior_weight = weight_data[voxel_index];
float current_weight = 1.0f;
float new_weight = prior_weight + current_weight;
// new_weight = min(new_weight, max_weight );
float prior_distance = distance_data[voxel_index];
float new_distance = ( (prior_distance * prior_weight) + (tsdf * current_weight) ) / new_weight;
weight_data[voxel_index] = new_weight;
distance_data[voxel_index] = new_distance;
} // End of sdf > -trunc
} // End of depth > 0
} // End of point in frustrum
voxel_index++;
} // End each voxel in this column
}
}
TSDFVolume::~TSDFVolume() {
std::cout << "Destroying TSDFVolume" << std::endl;
deallocate( );
}
/**
* Deallocate storage for this TSDF
*/
void TSDFVolume::deallocate( ) {
// Remove existing data
if ( m_distances ) {
hipFree( m_distances ) ;
m_distances = 0;
}
if ( m_weights ) {
hipFree( m_weights );
m_weights = 0;
}
if ( m_colours ) {
hipFree( m_colours );
m_colours = 0;
}
if ( m_deformation_nodes ) {
hipFree( m_deformation_nodes );
m_deformation_nodes = 0;
}
}
/**
* Constructor with specified number of voxels in each dimension
* @param size
* @param physical_size
*/
TSDFVolume::TSDFVolume( const UInt3& size, const Float3& physical_size ) : m_offset { 0.0, 0.0, 0.0 }, m_distances {NULL}, m_weights {NULL}, m_deformation_nodes{NULL}, m_colours{NULL} {
if ( ( size.x > 0 ) && ( size.y > 0 ) && ( size.z > 0 ) &&
( physical_size.x > 0 ) && ( physical_size.y > 0 ) && ( physical_size.z > 0 ) ) {
set_size( size.x, size.y, size.z , physical_size.x, physical_size.y, physical_size.z );
} else {
throw std::invalid_argument( "Attempt to construct TSDFVolume with zero or negative size" );
}
}
/**
* Make a TSDFVolume with the given dimensins and physical dimensions
* @param volume_x X dimension in voxels
* @param volume_y Y dimension in voxels
* @param volume_z Z dimension in voxels
* @param psize_x Physical size in X dimension in mm
* @param psize_y Physical size in Y dimension in mm
* @param psize_z Physical size in Z dimension in mm
*/
TSDFVolume::TSDFVolume( uint16_t volume_x, uint16_t volume_y, uint16_t volume_z, float psize_x, float psize_y, float psize_z ) : m_offset { 0.0, 0.0, 0.0 }, m_distances {NULL}, m_weights {NULL}, m_deformation_nodes{NULL}, m_colours{NULL} {
if ( ( volume_x > 0 ) && ( volume_y > 0 ) && ( volume_z > 0 ) &&
( psize_x > 0 ) && ( psize_y > 0 ) && ( psize_z > 0 ) ) {
set_size( volume_x, volume_y, volume_z , psize_x, psize_y, psize_z );
} else {
throw std::invalid_argument( "Attempt to construct CPUTSDFVolume with zero or negative size" );
}
}
/**
* Load a TSDFVolume from the specified file. The volume must previously have been saved
*/
TSDFVolume::TSDFVolume( const std::string& file_name ) {
using namespace std;
ifstream ifs{ file_name, ios::in | ios::binary };
bool success = true;
std::string specific_error_message = "";
ifs.read( (char *) &m_size, sizeof( m_size ) );
if( !ifs ) {
specific_error_message = "Couldn't load file data";
success=false;
} else {
std::cout << "Loading TSDF with size " << m_size.x << "x" << m_size.y << "x" << m_size.z << std::endl;
}
if( success ) {
ifs.read( (char *) &m_physical_size, sizeof( m_physical_size));
if( ifs ) {
std::cout << " physical size is " << m_physical_size.x << "x" << m_physical_size.y << "x" << m_physical_size.z << "mm" << std::endl;
// Compute voxel size
m_voxel_size = f3_div_elem( m_physical_size, m_size );
} else {
success=false;
specific_error_message = "Couldn't load physical size";
}
}
// Load other header stats
if( ifs ) {
ifs.read( (char *)&m_offset, sizeof( m_offset));
ifs.read( (char *)&m_truncation_distance, sizeof( m_truncation_distance));
ifs.read( (char *)&m_max_weight, sizeof( m_max_weight));
ifs.read( (char *)&m_global_translation, sizeof( m_global_translation));
ifs.read( (char *)&m_global_rotation, sizeof( m_global_rotation));
std::cout << "offset : (" << m_offset.x << "," << m_offset.y << "," << m_offset.z << ")" << std::endl;
std::cout << "trunc dist : " << m_truncation_distance << std::endl;
std::cout << "max weight : " << m_max_weight << std::endl;
std::cout << "global t : (" << m_global_translation.x << "," << m_global_translation.y << "," << m_global_translation.z << ")" << std::endl;
std::cout << "global R : (" << m_global_rotation.x << "," << m_global_rotation.y << "," << m_global_rotation.z << ")" << std::endl;
if( ifs ) {
std::cout << " read header data" << std::endl;
} else {
success = false;
specific_error_message = "Couldn't load header data";
}
}
// Compute some sizes
size_t num_voxels = m_size.x * m_size.y * m_size.z;
// Load distance data
if( success ) {
float * host_distances = new float[ num_voxels ];
if( host_distances ) {
size_t distance_data_size = num_voxels * sizeof( float);
hipError_t err = hipMalloc( &m_distances, distance_data_size );
if( err == hipSuccess ) {
// Read data into host memory, copy to device and free host memory
ifs.read( ( char * ) host_distances, distance_data_size );
if( ifs ) {
err = hipMemcpy( m_distances, host_distances, distance_data_size, hipMemcpyHostToDevice);
if( err == hipSuccess ) {
std::cout << " loaded distance data" << std::endl;
} else {
specific_error_message = "Failed to copy distance data to device";
success = false;
hipFree( m_distances );
}
delete[] host_distances;
} else {
specific_error_message = "Failed to read distance data";
success = false;
delete[] host_distances;
hipFree( m_distances );
}
} else {
specific_error_message = "Failed to allocate device memory for distance data";
success = false;
delete[] host_distances;
}
} else {
specific_error_message = "Failed to allocate host memory for distance data";
success = false;
}
}
// Load weight data
if( success ) {
float * host_weights = new float[ num_voxels ];
if( host_weights ) {
size_t weight_data_size = num_voxels * sizeof( float);
hipError_t err = hipMalloc( &m_weights, weight_data_size );
if( err == hipSuccess ) {
// Read data into host memory, copy to device and free host memory
ifs.read( ( char * ) host_weights, weight_data_size );
if( ifs ) {
err = hipMemcpy( m_weights, host_weights, weight_data_size, hipMemcpyHostToDevice);
if( err == hipSuccess ) {
std::cout << " loaded weight data" << std::endl;
} else {
specific_error_message = "Failed to copy weight data to device";
success = false;
hipFree( m_weights );
}
delete[] host_weights;
} else {
specific_error_message = "Failed to read weight data";
success = false;
delete[] host_weights;
hipFree( m_weights );
}
} else {
specific_error_message = "Failed to allocate device memory for weight data";
success = false;
delete[] host_weights;
}
} else {
specific_error_message = "Failed to allocate host memory for weight data";
success = false;
}
}
// Load colour data
if( success ) {
uchar3 * host_colours = new uchar3[ num_voxels ];
if( host_colours ) {
size_t colour_data_size = num_voxels * sizeof( uchar3 );
hipError_t err = hipMalloc( &m_colours, colour_data_size );
if( err == hipSuccess ) {
// Read data into host memory, copy to device and free host memory
ifs.read( ( char * ) host_colours, colour_data_size );
if( ifs ) {
err = hipMemcpy( m_colours, host_colours, colour_data_size, hipMemcpyHostToDevice);
if( err == hipSuccess ) {
std::cout << " loaded colour data" << std::endl;
} else {
specific_error_message = "Failed to copy colour data to device";
success = false;
hipFree( m_colours );
}
delete[] host_colours;
} else {
specific_error_message = "Failed to read colour data";
success = false;
delete[] host_colours;
hipFree( m_colours );
}
} else {
specific_error_message = "Failed to allocate device memory for colour data";
success = false;
delete[] host_colours;
}
} else {
specific_error_message = "Failed to allocate host memory for colour data";
success = false;
}
}
// Load deformation data
if( success ) {
DeformationNode * host_deformations = new DeformationNode[ num_voxels ];
if( host_deformations ) {
size_t deformation_data_size = num_voxels * sizeof( DeformationNode );
hipError_t err = hipMalloc( &m_deformation_nodes, deformation_data_size );
if( err == hipSuccess ) {
// Read data into host memory, copy to device and free host memory
ifs.read( ( char * ) host_deformations, deformation_data_size );
if( ifs ) {
err = hipMemcpy( m_deformation_nodes, host_deformations, deformation_data_size, hipMemcpyHostToDevice);
if( err == hipSuccess ) {
std::cout << " loaded deformation data" << std::endl;
} else {
specific_error_message = "Failed to copy deformation data to device";
success = false;
hipFree( m_deformation_nodes );
}
delete[] host_deformations;
} else {
specific_error_message = "Failed to read deformation data";
success = false;
delete[] host_deformations;
hipFree( m_deformation_nodes );
}
} else {
specific_error_message = "Failed to allocate device memory for deformation data";
success = false;
delete[] host_deformations;
}
} else {
specific_error_message = "Failed to allocate host memory for deformation data";
success = false;
}
}
ifs.close();
if( !success ) {
std::string msg = "Failed to load TSDF ";
msg += file_name;
msg += " " + specific_error_message;
throw std::invalid_argument( msg );
}
}
/**
* Set the size of the volume. This will delete any existing values and resize the volume, clearing it when done.
* Volume offset is maintained
* @param volume_x X dimension in voxels
* @param volume_y Y dimension in voxels
* @param volume_z Z dimension in voxels
* @param psize_x Physical size in X dimension in mm
* @param psize_y Physical size in Y dimension in mm
* @param psize_z Physical size in Z dimension in mm
*/
__host__
void TSDFVolume::set_size( uint16_t volume_x, uint16_t volume_y, uint16_t volume_z, float psize_x, float psize_y, float psize_z) {
if ( ( volume_x != 0 && volume_y != 0 && volume_z != 0 ) && ( psize_x != 0 && psize_y != 0 && psize_z != 0 ) ) {
// Remove existing data
deallocate() ;
m_size = dim3 { volume_x, volume_y, volume_z };
m_physical_size = float3 { psize_x, psize_y, psize_z };
// Compute truncation distance - must be at least 2x max voxel size
m_voxel_size = f3_div_elem( m_physical_size, m_size );
// Set t > diagonal of voxel
m_truncation_distance = 1.1f * f3_norm( m_voxel_size );
// Allocate device storage
hipError_t err;
size_t data_size = volume_x * volume_y * volume_z * sizeof( float );
err = hipMalloc( &m_distances, data_size );
check_cuda_error( "Couldn't allocate space for distance data for TSDF", err );
err = hipMalloc( &m_weights, data_size );
check_cuda_error( "Couldn't allocate space for weight data for TSDF", err );
err = hipMalloc( &m_colours, volume_x * volume_y * volume_z * sizeof( uchar3 ) );
check_cuda_error( "Couldn't allocate space for colour data for TSDF", err );
err = hipMalloc( &m_deformation_nodes, volume_x * volume_y * volume_z * sizeof( DeformationNode ) );
check_cuda_error( "Couldn't allocate space for deformation nodes for TSDF", err );
m_global_rotation = make_float3( 0.0f, 0.0f, 0.0f );
m_global_translation = make_float3( 0.0f, 0.0f, 0.0f );
clear();
// Max weight for integrating depth images
m_max_weight = 15.0f;
} else {
throw std::invalid_argument( "Attempt to set TSDF size or physical size to zero" );
}
}
#pragma mark - Data access
/**
* Set the distance data for the TSDF in one call
* @param distance_data Pointer to enough floats to populate the TSFD
*/
void TSDFVolume::set_distance_data( const float * distance_data ) {
size_t data_size = m_size.x * m_size.y * m_size.z * sizeof( float);
hipError_t err = hipMemcpy( m_distances, distance_data, data_size, hipMemcpyHostToDevice );
check_cuda_error( "Couldn't set distance data", err );
}
/**
* Set the weight data for the TSDF in one call
* @param weight_data Pointer to enough floats to populate the TSFD
*/
void TSDFVolume::set_weight_data( const float * weight_data ) {
size_t data_size = m_size.x * m_size.y * m_size.z * sizeof( float);
hipError_t err = hipMemcpy( m_weights, weight_data, data_size, hipMemcpyHostToDevice );
check_cuda_error( "Couldn't set weight data", err );
}
/**
* Set the deformation data for this space
* @param data Data in host memory space; Assumed to be vx*vy*vz DeformationNode
*/
void TSDFVolume::set_deformation( DeformationNode *deformation) {
size_t data_size = m_size.x * m_size.y * m_size.z * sizeof( DeformationNode );
hipError_t err = hipMemcpy( m_deformation_nodes, deformation, data_size, hipMemcpyHostToDevice );
check_cuda_error( "Couldn't set deformation", err );
}
/**
* Reset the defomation grid by setting each translation point to the effectve, reglar position
* in space of that voxel centre and the related rotation to {0,0,0}
* @param deformation_nodes X x Y x Z array of DeformationNodes
* @param grid_size The size of the voxel grid
* @param voxel_size The size of an individual voxel
* @param grid_offset The offset of the grid
*/
__global__
void initialise_deformation( TSDFVolume::DeformationNode * deformation, dim3 grid_size, float3 voxel_size, float3 grid_offset ) {
// Extract the voxel Y and Z coordinates we then iterate over X
int vy = threadIdx.y + blockIdx.y * blockDim.y;
int vz = threadIdx.z + blockIdx.z * blockDim.z;
// If this thread is in range
if ( vy < grid_size.y && vz < grid_size.z ) {
// The next (x_size) elements from here are the x coords
size_t base_voxel_index = ((grid_size.x * grid_size.y) * vz ) + (grid_size.x * vy);
size_t voxel_index = base_voxel_index;
for ( int vx = 0; vx < grid_size.x; vx++ ) {
deformation[voxel_index].translation.x = (( vx + 0.5f ) * voxel_size.x) + grid_offset.x;
deformation[voxel_index].translation.y = (( vy + 0.5f ) * voxel_size.y) + grid_offset.y;
deformation[voxel_index].translation.z = (( vz + 0.5f ) * voxel_size.z) + grid_offset.z;
deformation[voxel_index].rotation.x = 0.0f;
deformation[voxel_index].rotation.y = 0.0f;
deformation[voxel_index].rotation.z = 0.0f;
voxel_index++;
}
}
}
__global__
void set_memory_to_value( float * pointer, int size, float value ) {
int idx = threadIdx.x + (blockIdx.x * blockDim.x );
if( idx < size ) {
pointer[idx] = value;
}
}
/**
* Clear the TSDF memory on the device
* zeros colour and weight data, sets distance to truncation_distance
*/
__host__
void TSDFVolume::clear( ) {
int data_size = m_size.x * m_size.y * m_size.z;
dim3 block( 1024, 1, 1 );
dim3 grid ( divUp( data_size, block.x ), 1, 1 );
hipError_t err;
// Clear weights to 0
hipLaunchKernelGGL(( set_memory_to_value), dim3(grid), dim3(block) , 0, 0, m_weights, data_size, 0.0f );
hipDeviceSynchronize( );
err = hipGetLastError();
check_cuda_error( "Couldn't clear weight data", err );
// Set distance data to truncation distance
hipLaunchKernelGGL(( set_memory_to_value), dim3(grid), dim3(block) , 0, 0, m_distances, data_size, m_truncation_distance );
hipDeviceSynchronize( );
err = hipGetLastError();
check_cuda_error( "Couldn't clear depth data", err );
// Clear RGB data to black
err = hipMemset( m_colours, data_size * 3, 0 );
check_cuda_error( "Couldn't clear colour data", err );
// Now initialise the deformations
dim3 block2( 1, 32, 32 );
dim3 grid2 ( 1, divUp( m_size.y, block2.y ), divUp( m_size.z, block2.z ) );
hipLaunchKernelGGL(( initialise_deformation) , dim3(grid2), dim3(block2), 0, 0, m_deformation_nodes, m_size, m_voxel_size, m_offset );
hipDeviceSynchronize( );
err = hipGetLastError();
check_cuda_error( "Couldn't initialise deformation nodes", err );
}
#pragma mark - Integrate new depth data
/**
* Integrate a range map into the TSDF
* This follows the approach in Cohen, N.S.V. 2013, 'Open Fusion', pp. 135.
* whereby new maps have less weight than existing maps
* @param depth_map Pointer to width*height depth values where 0 is an invalid depth and positive values are expressed in mm
* @param width The horiontal dimension of the depth_map
* @param height The height of the depth_map
* @param camera The camera from which the depth_map was taken
*/
__host__
void TSDFVolume::integrate( const uint16_t * depth_map, uint32_t width, uint32_t height, const Camera & camera ) {
assert( depth_map );
std::cout << "Integrating depth map size " << width << "x" << height << std::endl;
// Convert the input parameters to device (CUDA) types
Mat44 pose;
memcpy( &pose, camera.pose().data(), 16 * sizeof( float ) );
Mat44 inv_pose;
memcpy( &inv_pose, camera.inverse_pose().data(), 16 * sizeof( float ) );
Mat33 k;
memcpy( &k, camera.k().data(), 9 * sizeof( float ) );
Mat33 kinv;
memcpy( &kinv, camera.kinv().data(), 9 * sizeof( float ) );
// Copy depth map data to device
uint16_t * d_depth_map;
size_t data_size = width * height * sizeof( uint16_t);
hipError_t err = hipMalloc( &d_depth_map, data_size );
check_cuda_error( "Couldn't allocate storage for depth map", err);
err = hipMemcpy( d_depth_map, depth_map, data_size, hipMemcpyHostToDevice );
check_cuda_error( "Failed to copy depth map to GPU", err);
// Call the kernel
dim3 block( 1, 20, 20 );
dim3 grid ( 1, divUp( m_size.y, block.y ), divUp( m_size.z, block.z ) );
hipLaunchKernelGGL(( integrate_kernel) , dim3(grid), dim3(block), 0, 0, m_distances, m_weights, m_size, m_physical_size, m_deformation_nodes, m_offset, m_truncation_distance, m_max_weight, pose, inv_pose, k, kinv, width, height, d_depth_map);
hipDeviceSynchronize( );
err = hipGetLastError();
check_cuda_error( "Integrate kernel failed", err);
// Now delete depth map data from device
err = hipFree( d_depth_map );
check_cuda_error( "Failed to deallocate cuda depth map", err);
std::cout << "Integration finished" << std::endl;
}
#pragma mark - Import/Export
/**
* Save the TSDF to a binary file
* @param The filename
* @return true if the file saved OK otherwise false.
*/
bool TSDFVolume::save_to_file( const std::string & file_name) const {
using namespace std;
bool success = true;
// We need to extract the data from the GPU device into host memory
float * host_distances = nullptr;
uchar3 * host_colours = nullptr;
float * host_weights = nullptr;
DeformationNode * host_deformation = nullptr;
size_t num_voxels = m_size.x * m_size.y * m_size.z;
hipError_t err;
// Copy distance data from device to host
size_t distance_data_size = num_voxels * sizeof( float);
host_distances = new float[ num_voxels ];
if ( host_distances ) {
err = hipMemcpy( host_distances, m_distances, distance_data_size, hipMemcpyDeviceToHost);
if ( err != hipSuccess ) {
success = false;
std::cout << "Failed to copy voxel data from device memory [" << err << "] " << std::endl;
}
} else {
std::cout << "Couldn't allocate host_voxels memory to save TSDF" << std::endl;
success = false;
}
// Copy weight data from device to host
size_t weight_data_size = num_voxels * sizeof( float);
if ( success ) {
host_weights = new float[ num_voxels ];
if ( host_weights) {
err = hipMemcpy( host_weights, m_weights, weight_data_size, hipMemcpyDeviceToHost);
if ( err != hipSuccess ) {
success = false;
std::cout << "Failed to copy weight data from device memory [" << err << "] " << std::endl;
}
} else {
success = false;
std::cout << "Couldn't allocate host_weights memory to save TSDF" << std::endl;
}
}
// Copy colour data from device to host
size_t colour_data_size = num_voxels * sizeof( uchar3 );
if ( success ) {
host_colours = new uchar3[ num_voxels ];
if ( host_colours) {
err = hipMemcpy( host_colours, m_colours, colour_data_size, hipMemcpyDeviceToHost);
if ( err != hipSuccess ) {
success = false;
std::cout << "Failed to copy colour data from device memory [" << err << "] " << std::endl;
}
} else {
success = false;
std::cout << "Couldn't allocate host_colours memory to save TSDF" << std::endl;
}
}
// Copy deformation data from device to host
size_t deformation_data_size = num_voxels * sizeof( DeformationNode );
if ( success ) {
host_deformation = new DeformationNode[ num_voxels ];
if ( host_deformation ) {
err = hipMemcpy( host_deformation, m_deformation_nodes, deformation_data_size, hipMemcpyDeviceToHost);
if ( err != hipSuccess ) {
success = false;
std::cout << "Failed to copy deformation data from device memory [" << err << "] " << std::endl;
}
} else {
success = false;
std::cout << "Couldn't allocate host_weights memory to save TSDF" << std::endl;
}
}
// Now it's all local, write to file
if( success ) {
ofstream ofs { file_name, ios::out | ios::binary };
// Write dimesnions
size_t header_size = sizeof( m_size ) + sizeof( m_physical_size) + sizeof( m_offset) + sizeof( m_truncation_distance ) + sizeof( m_max_weight ) + sizeof( m_global_translation) + sizeof(m_global_rotation);
std::cout << " writing "<< header_size <<" bytes of header data" << std::endl;
ofs.write( (char *) &m_size, sizeof( m_size ) );
ofs.write( (char *)&m_physical_size, sizeof( m_physical_size));
ofs.write( (char *)&m_offset, sizeof( m_offset));
ofs.write( (char *)&m_truncation_distance, sizeof( m_truncation_distance));
ofs.write( (char *)&m_max_weight, sizeof( m_max_weight));
ofs.write( (char *)&m_global_translation, sizeof( m_global_translation));
ofs.write( (char *)&m_global_rotation, sizeof( m_global_rotation));
std::cout << " writing "<< distance_data_size <<" bytes of depth data" << std::endl;
ofs.write( (char *)host_distances, distance_data_size );
std::cout << " writing "<< weight_data_size <<" bytes of weight data" << std::endl;
ofs.write( (char *)host_weights, weight_data_size );
std::cout << " writing "<< colour_data_size <<" bytes of colour data" << std::endl;
ofs.write( (char *)host_colours, colour_data_size );
std::cout << " writing "<< deformation_data_size <<" bytes of deformation data" << std::endl;
ofs.write( (char *)host_deformation, deformation_data_size );
ofs.close();
} else {
std::cout << "Not saving file due to previous errors" << std::endl;
}
// Free up memory
if ( host_distances != nullptr ) { delete[] host_distances; }
if ( host_colours != nullptr ) { delete[] host_colours; }
if ( host_weights != nullptr ) { delete[] host_weights; }
if ( host_deformation != nullptr ) { delete[] host_deformation; }
return success;
}
/**
* Load the given TSDF file
* @param The filename
* @return true if the file saved OK otherwise false.
*/
bool TSDFVolume::load_from_file( const std::string & file_name) {
using namespace std;
ifstream ifs{ file_name, ios::in | ios::binary };
// Load dimensions
// Load data
// Move to device
std::cout << "Not yet implemented: load_from_file" << std::endl;
return false;
}
#pragma mark - Rendering
/**
* Render this TSDF to a raycast image
*/
void TSDFVolume::raycast( uint16_t width, uint16_t height, const Camera& camera, Eigen::Matrix<float, 3, Eigen::Dynamic>& vertices, Eigen::Matrix<float, 3, Eigen::Dynamic>& normals ) const {
GPURaycaster raycaster( width, height );
raycaster.raycast( *this, camera, vertices, normals );
}
| a705630536e644f834378cb8384729e32eac75a0.cu | //
// TSDFVolume.cpp
// TSDF
//
// Created by Dave on 11/03/2016.
// Copyright © 2016 Sindesso. All rights reserved.
//
#include "libsdf/cuda_utilities.hpp"
#include "libsdf/cuda_coordinate_transforms.hpp"
#include "libsdf/TSDFVolume.hpp"
#include "libsdf/GPURaycaster.hpp"
#include "libsdf/TSDF_utilities.hpp"
#include <fstream>
#include <iomanip>
#include <cfloat>
#include <cstdint>
#include "math_constants.h"
const float POINT_EPSILON=0.001f;
/**
* Compute the index into the voxel space for a given x,y,z coordinate
* @param size The size (in voxels) of the volume
* @param x The x coord
* @param y The y coord
* @param z The z coord
* @return The index
*/
__device__ __forceinline__
size_t index( const dim3& size, int x, int y, int z ) {
return x + (y * size.x) + (z * size.x * size.y);
};
/**
* @param size The size (in voxels) of the volume
* @param distance_data The depth data for the volume
* @param x The horizontal voxel coord
* @param y The vertical voxel coord
* @param z The depth voxel coord
* @return The distance to the surface at that voxel
*/
__device__ __forceinline__
float distance( const dim3& size, float *distance_data, int x, int y, int z ) {
return distance_data[ index( size, x, y, z) ];
}
/**
* Set the distance to the surface at a grid point
* @param size The size (in voxels) of the volume
* @param distance_data The depth data for the volume
* @param x The horizontal voxel coord
* @param y The vertical voxel coord
* @param z The depth voxel coord
* @param distance The distance to set
*/
__device__ __forceinline__
void set_distance(const dim3& size, float * distance_data, int x, int y, int z, float distance ) {
size_t idx = index( size, x, y, z );
distance_data[ idx ] = distance;
}
/**
* @param size The size (in voxels) of the volume
* @param weights The weight data for the volume
* @param x The horizontal voxel coord
* @param y The vertical voxel coord
* @param z The depth voxel coord
* @return The weight at that voxel
*/
__device__ __forceinline__
float weight( const dim3& size, float * weights, int x, int y, int z ) {
return weights[ index(size, x, y, z) ];
}
/**
* @param size The size (in voxels) of the volume
* @param weights The weight data for the volume
* @param x The horizontal voxel coord
* @param y The vertical voxel coord
* @param z The depth voxel coord
* @param weight The weight to set
* @return The weight at that voxel
*/
__device__ __forceinline__
void set_weight( const dim3& size, float * weights, int x, int y, int z, float weight ) {
weights[ index(size, x, y, z) ] = weight;
}
/**
* Obtain indices and trilinear coefficients for for the gridpoints which surround the given point in space
* @param point The point in TSDF coordinate space
* @param voxel_grid_size Dimensions of the TSDF
* @param voxel_size The physical size of a single voxel
* @param indices An array of indices of the voxels surrounding the given point
* ordered as (minx, miny, minz), (maxx, miny, minz), (maxx, miny, maxz), (maxx, miny, minz) and then the maxz values
* @return true If the values in indices are valid (ie point is in TSDF space)
*/
__device__
bool get_trilinear_elements( const float3 point,
const dim3 voxel_grid_size,
const float3 voxel_size,
int * const indices,
float * const coefficients ) {
bool is_valid = false;
// Manage boundary points
float3 max_values {
voxel_grid_size.x * voxel_size.x,
voxel_grid_size.y * voxel_size.y,
voxel_grid_size.z * voxel_size.z
};
float3 adjusted_point = point;
if( (point.x > max_values.x) && ( point.x - max_values.x < POINT_EPSILON ) ) adjusted_point.x = max_values.x - POINT_EPSILON;
if( (point.y > max_values.y) && ( point.y - max_values.y < POINT_EPSILON ) ) adjusted_point.y = max_values.y - POINT_EPSILON;
if( (point.z > max_values.z) && ( point.z - max_values.z < POINT_EPSILON ) ) adjusted_point.z = max_values.z - POINT_EPSILON;
if( point.x < -POINT_EPSILON ) adjusted_point.x = 0.0f;
if( point.y < -POINT_EPSILON ) adjusted_point.y = 0.0f;
if( point.z < -POINT_EPSILON ) adjusted_point.z = 0.0f;
// Get the voxel containing this point
int3 voxel = voxel_for_point( adjusted_point, voxel_size );
// Handle voxel out of bounds
if ( voxel.x >= 0 && voxel.y >= 0 && voxel.z >= 0 && voxel.x < voxel_grid_size.x && voxel.y < voxel_grid_size.y && voxel.z < voxel_grid_size.z) {
// Get the centre of the voxel
float3 v_centre = centre_of_voxel_at( voxel.x, voxel.y, voxel.z, voxel_size );
// Set up the lower bound for trilinear interpolation
int3 lower;
lower.x = (adjusted_point.x < v_centre.x) ? voxel.x - 1 : voxel.x;
lower.y = (adjusted_point.y < v_centre.y) ? voxel.y - 1 : voxel.y;
lower.z = (adjusted_point.z < v_centre.z) ? voxel.z - 1 : voxel.z;
// Handle lower out of bounds
lower.x = max( lower.x, 0 );
lower.y = max( lower.y, 0 );
lower.z = max( lower.z, 0 );
// Compute u,v,w
float3 lower_centre = centre_of_voxel_at( lower.x, lower.y, lower.z, voxel_size );
float3 uvw = f3_sub( adjusted_point, lower_centre );
uvw = f3_div_elem( uvw, voxel_size );
float u = uvw.x;
float v = uvw.y;
float w = uvw.z;
// Populate indices
int delta_x = 1;
int delta_y = voxel_grid_size.x;
int delta_z = voxel_grid_size.x * voxel_grid_size.y;
indices[0] = lower.x + ( lower.y * voxel_grid_size.x ) + ( lower.z * voxel_grid_size.x * voxel_grid_size.y );
indices[1] = indices[0] + delta_x;
indices[2] = indices[1] + delta_z;
indices[3] = indices[0] + delta_z;
indices[4] = indices[0] + delta_y;
indices[5] = indices[1] + delta_y;
indices[6] = indices[2] + delta_y;
indices[7] = indices[3] + delta_y;
// And coefficients
coefficients[0] = (1 - u) * (1 - v) * (1 - w);
coefficients[1] = u * (1 - v) * (1 - w);
coefficients[2] = u * (1 - v) * w ;
coefficients[3] = (1 - u) * (1 - v) * w;
coefficients[4] = (1 - u) * v * (1 - w);
coefficients[5] = u * v * (1 - w);
coefficients[6] = (1 - u) * v * w;
coefficients[7] = u * v * w;
}
// Voxel is out of bounds and so can't be used.
else {
printf( "Point outside of voxel space %f, %f, %f\n", point.x, point.y, point.z );
}
return is_valid;
}
/**
* Apply rotation to point
* @param rotation The rotation expressed as 3 Euler angles
* @param point The point to rotate
* @return The rotated point
*/
__device__
float3 rotate( const float3 point, const float3 rotation ) {
float c1 = cos( rotation.x );
float c2 = cos( rotation.y );
float c3 = cos( rotation.z );
float s1 = sin( rotation.x );
float s2 = sin( rotation.y );
float s3 = sin( rotation.z );
float rx = (c2 * c3) * point.x - (c2 * s3 ) * point.y + s2 * point.z;
float ry = (c1*s3 + s1*s2*c3) * point.x + (c1*c3-s1*s2*s3) * point.y - (s1 * c2 ) * point.z;
float rz = (s1*s3 - c1*s2*c3) * point.x + (s1*c3 + c1*s2*s3)* point.y + (c1 * c2 ) * point.z;
return make_float3( rx, ry, rz );
}
/**
* Apply the TSDF deformation field to a collection of points, deforming them in place
* @param global_rotation The global rotation of the space
* @param global_translation The global translation of the space
* @param deformation_nodes An array of DeformationNodes
* @param voxel_grid_size The voxel size of the space
* @param voxel_space_size The physical size of the space
* @param points The points to be transformed in world coordinates
* @param num_points The number of points to be transformed
*/
__global__
void deformation_kernel( const float3 global_rotation,
const float3 global_translation,
const TSDFVolume::DeformationNode * const deformation_nodes,
const dim3 voxel_grid_size,
const float3 voxel_size,
const float3 voxel_space_size,
const float3 offset,
const int num_points,
float3 * points) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx < num_points ) {
float3 point = points[idx];
// Get indices of neighbours
point = f3_sub( point, offset );
int neighbours[8];
float coefficients[8];
get_trilinear_elements( point, voxel_grid_size, voxel_size, neighbours, coefficients );
// Compute the deformation at this point
float3 deformed_point{ 0.0f, 0.0f, 0.0f };
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[0], deformation_nodes[ neighbours[0] ].translation ) );
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[1], deformation_nodes[ neighbours[1] ].translation ) );
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[2], deformation_nodes[ neighbours[2] ].translation ) );
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[3], deformation_nodes[ neighbours[3] ].translation ) );
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[4], deformation_nodes[ neighbours[4] ].translation ) );
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[5], deformation_nodes[ neighbours[5] ].translation ) );
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[6], deformation_nodes[ neighbours[6] ].translation ) );
deformed_point = f3_add( deformed_point, f3_mul_scalar( coefficients[7], deformation_nodes[ neighbours[7] ].translation ) );
// Apply global rotation
deformed_point = rotate( deformed_point, global_rotation );
// Apply global translation
deformed_point = f3_add( deformed_point, global_translation );
// Set this to output point
points[idx] = deformed_point;
}
}
/**
*
*/
void TSDFVolume::deform_mesh( const int num_points, float3 * points ) const {
// Copy the point array to the device
float3 * d_points;
cudaSafeAlloc( (void **) &d_points, num_points * sizeof( float3 ), "d_points" );
cudaError_t err = cudaMemcpy( d_points, points, num_points * sizeof( float3 ), cudaMemcpyHostToDevice );
check_cuda_error( "Failed to copy points to device for deformation", err);
dim3 block( 512, 1, 1 );
dim3 grid ( divUp( num_points, block.x ), 1, 1 );
deformation_kernel<<<grid, block >>>(m_global_rotation,
m_global_translation,
m_deformation_nodes,
m_size,
m_voxel_size,
m_physical_size,
m_offset,
num_points,
d_points );
cudaDeviceSynchronize( );
err = cudaGetLastError();
check_cuda_error( "Deformation kernel failed", err);
err = cudaMemcpy( points, d_points, num_points * sizeof( float3 ), cudaMemcpyDeviceToHost );
check_cuda_error( "Failed to copy points from device after deformation", err);
cudaSafeFree( d_points, "d_points");
}
/**
* @param distance_data The voxel values (in device memory)
* @param weight_data The weight values (in device memory)
* @param voxel_grid_size The voxel size of the space
* @param voxel_space_size The physical size of the space
* @param offset The offset of the front, bottom, left corner
* @param trunc_distance A distance, greater than the voxel diagonal, at which we truncate distance measures in the TSDF
* @param pose The camera pose matrix (maps cam to world, 4x4 )
* @param inv_pose Inverse of the camera pose matrix (maps world to camera coords) (4x4)
* @param k The camera's intrinsic parameters (3x3)
* @param kinv Invers eof k (3x3)
* @param width Width of the depth image
* @param height Height of the depth image
* @param depth_map Pointer to array of width*height uint16 types in devcie memory
*/
__global__
void integrate_kernel( float * distance_data,
float * weight_data,
dim3 voxel_grid_size,
float3 voxel_space_size,
TSDFVolume::DeformationNode * deformation_nodes,
float3 offset,
const float trunc_distance,
const float max_weight,
Mat44 pose,
Mat44 inv_pose,
Mat33 k,
Mat33 kinv,
uint32_t width,
uint32_t height,
const uint16_t * depth_map) {
// Extract the voxel Y and Z coordinates we then iterate over X
int vy = threadIdx.y + blockIdx.y * blockDim.y;
int vz = threadIdx.z + blockIdx.z * blockDim.z;
// If this thread is in range
if ( vy < voxel_grid_size.y && vz < voxel_grid_size.z ) {
// The next (x_size) elements from here are the x coords
int voxel_index = ((voxel_grid_size.x * voxel_grid_size.y) * vz ) + (voxel_grid_size.x * vy);
// For each voxel in this column
for ( int vx = 0; vx < voxel_grid_size.x; vx++ ) {
// Work out where in the image, the centre of this voxel projects
// This gives us a pixel in the depth map
// Convert voxel to world coords of deformed centre
float3 centre_of_voxel = f3_add( offset, deformation_nodes[ voxel_index ].translation);
// Convert world to pixel coords
int3 centre_of_voxel_in_pix = world_to_pixel( centre_of_voxel, inv_pose, k );
// if this point is in the camera view frustum...
if ( ( centre_of_voxel_in_pix.x >= 0 ) && ( centre_of_voxel_in_pix.x < width ) && ( centre_of_voxel_in_pix.y >= 0 ) && ( centre_of_voxel_in_pix.y < height) ) {
// Extract the depth to the surface at this point
uint32_t voxel_pixel_index = centre_of_voxel_in_pix.y * width + centre_of_voxel_in_pix.x;
uint16_t surface_depth = depth_map[ voxel_pixel_index ];
// If the depth is valid
if ( surface_depth > 0 ) {
// Project depth entry to a vertex ( in camera space)
float3 surface_vertex = pixel_to_camera( centre_of_voxel_in_pix, kinv, surface_depth );
// Compute the SDF is the distance between the camera origin and surface_vertex in world coordinates
float3 voxel_cam = world_to_camera( centre_of_voxel, inv_pose );
float sdf = surface_vertex.z - voxel_cam.z;
if( sdf >= -trunc_distance ) {
// Truncate the sdf to the range -trunc_distance -> trunc_distance
float tsdf;
if ( sdf > 0 ) {
tsdf = min( sdf, trunc_distance);
} else {
tsdf = sdf;
}
// Extract prior weight
float prior_weight = weight_data[voxel_index];
float current_weight = 1.0f;
float new_weight = prior_weight + current_weight;
// new_weight = min(new_weight, max_weight );
float prior_distance = distance_data[voxel_index];
float new_distance = ( (prior_distance * prior_weight) + (tsdf * current_weight) ) / new_weight;
weight_data[voxel_index] = new_weight;
distance_data[voxel_index] = new_distance;
} // End of sdf > -trunc
} // End of depth > 0
} // End of point in frustrum
voxel_index++;
} // End each voxel in this column
}
}
TSDFVolume::~TSDFVolume() {
std::cout << "Destroying TSDFVolume" << std::endl;
deallocate( );
}
/**
* Deallocate storage for this TSDF
*/
void TSDFVolume::deallocate( ) {
// Remove existing data
if ( m_distances ) {
cudaFree( m_distances ) ;
m_distances = 0;
}
if ( m_weights ) {
cudaFree( m_weights );
m_weights = 0;
}
if ( m_colours ) {
cudaFree( m_colours );
m_colours = 0;
}
if ( m_deformation_nodes ) {
cudaFree( m_deformation_nodes );
m_deformation_nodes = 0;
}
}
/**
* Constructor with specified number of voxels in each dimension
* @param size
* @param physical_size
*/
TSDFVolume::TSDFVolume( const UInt3& size, const Float3& physical_size ) : m_offset { 0.0, 0.0, 0.0 }, m_distances {NULL}, m_weights {NULL}, m_deformation_nodes{NULL}, m_colours{NULL} {
if ( ( size.x > 0 ) && ( size.y > 0 ) && ( size.z > 0 ) &&
( physical_size.x > 0 ) && ( physical_size.y > 0 ) && ( physical_size.z > 0 ) ) {
set_size( size.x, size.y, size.z , physical_size.x, physical_size.y, physical_size.z );
} else {
throw std::invalid_argument( "Attempt to construct TSDFVolume with zero or negative size" );
}
}
/**
* Make a TSDFVolume with the given dimensins and physical dimensions
* @param volume_x X dimension in voxels
* @param volume_y Y dimension in voxels
* @param volume_z Z dimension in voxels
* @param psize_x Physical size in X dimension in mm
* @param psize_y Physical size in Y dimension in mm
* @param psize_z Physical size in Z dimension in mm
*/
TSDFVolume::TSDFVolume( uint16_t volume_x, uint16_t volume_y, uint16_t volume_z, float psize_x, float psize_y, float psize_z ) : m_offset { 0.0, 0.0, 0.0 }, m_distances {NULL}, m_weights {NULL}, m_deformation_nodes{NULL}, m_colours{NULL} {
if ( ( volume_x > 0 ) && ( volume_y > 0 ) && ( volume_z > 0 ) &&
( psize_x > 0 ) && ( psize_y > 0 ) && ( psize_z > 0 ) ) {
set_size( volume_x, volume_y, volume_z , psize_x, psize_y, psize_z );
} else {
throw std::invalid_argument( "Attempt to construct CPUTSDFVolume with zero or negative size" );
}
}
/**
* Load a TSDFVolume from the specified file. The volume must previously have been saved
*/
TSDFVolume::TSDFVolume( const std::string& file_name ) {
using namespace std;
ifstream ifs{ file_name, ios::in | ios::binary };
bool success = true;
std::string specific_error_message = "";
ifs.read( (char *) &m_size, sizeof( m_size ) );
if( !ifs ) {
specific_error_message = "Couldn't load file data";
success=false;
} else {
std::cout << "Loading TSDF with size " << m_size.x << "x" << m_size.y << "x" << m_size.z << std::endl;
}
if( success ) {
ifs.read( (char *) &m_physical_size, sizeof( m_physical_size));
if( ifs ) {
std::cout << " physical size is " << m_physical_size.x << "x" << m_physical_size.y << "x" << m_physical_size.z << "mm" << std::endl;
// Compute voxel size
m_voxel_size = f3_div_elem( m_physical_size, m_size );
} else {
success=false;
specific_error_message = "Couldn't load physical size";
}
}
// Load other header stats
if( ifs ) {
ifs.read( (char *)&m_offset, sizeof( m_offset));
ifs.read( (char *)&m_truncation_distance, sizeof( m_truncation_distance));
ifs.read( (char *)&m_max_weight, sizeof( m_max_weight));
ifs.read( (char *)&m_global_translation, sizeof( m_global_translation));
ifs.read( (char *)&m_global_rotation, sizeof( m_global_rotation));
std::cout << "offset : (" << m_offset.x << "," << m_offset.y << "," << m_offset.z << ")" << std::endl;
std::cout << "trunc dist : " << m_truncation_distance << std::endl;
std::cout << "max weight : " << m_max_weight << std::endl;
std::cout << "global t : (" << m_global_translation.x << "," << m_global_translation.y << "," << m_global_translation.z << ")" << std::endl;
std::cout << "global R : (" << m_global_rotation.x << "," << m_global_rotation.y << "," << m_global_rotation.z << ")" << std::endl;
if( ifs ) {
std::cout << " read header data" << std::endl;
} else {
success = false;
specific_error_message = "Couldn't load header data";
}
}
// Compute some sizes
size_t num_voxels = m_size.x * m_size.y * m_size.z;
// Load distance data
if( success ) {
float * host_distances = new float[ num_voxels ];
if( host_distances ) {
size_t distance_data_size = num_voxels * sizeof( float);
cudaError_t err = cudaMalloc( &m_distances, distance_data_size );
if( err == cudaSuccess ) {
// Read data into host memory, copy to device and free host memory
ifs.read( ( char * ) host_distances, distance_data_size );
if( ifs ) {
err = cudaMemcpy( m_distances, host_distances, distance_data_size, cudaMemcpyHostToDevice);
if( err == cudaSuccess ) {
std::cout << " loaded distance data" << std::endl;
} else {
specific_error_message = "Failed to copy distance data to device";
success = false;
cudaFree( m_distances );
}
delete[] host_distances;
} else {
specific_error_message = "Failed to read distance data";
success = false;
delete[] host_distances;
cudaFree( m_distances );
}
} else {
specific_error_message = "Failed to allocate device memory for distance data";
success = false;
delete[] host_distances;
}
} else {
specific_error_message = "Failed to allocate host memory for distance data";
success = false;
}
}
// Load weight data
if( success ) {
float * host_weights = new float[ num_voxels ];
if( host_weights ) {
size_t weight_data_size = num_voxels * sizeof( float);
cudaError_t err = cudaMalloc( &m_weights, weight_data_size );
if( err == cudaSuccess ) {
// Read data into host memory, copy to device and free host memory
ifs.read( ( char * ) host_weights, weight_data_size );
if( ifs ) {
err = cudaMemcpy( m_weights, host_weights, weight_data_size, cudaMemcpyHostToDevice);
if( err == cudaSuccess ) {
std::cout << " loaded weight data" << std::endl;
} else {
specific_error_message = "Failed to copy weight data to device";
success = false;
cudaFree( m_weights );
}
delete[] host_weights;
} else {
specific_error_message = "Failed to read weight data";
success = false;
delete[] host_weights;
cudaFree( m_weights );
}
} else {
specific_error_message = "Failed to allocate device memory for weight data";
success = false;
delete[] host_weights;
}
} else {
specific_error_message = "Failed to allocate host memory for weight data";
success = false;
}
}
// Load colour data
if( success ) {
uchar3 * host_colours = new uchar3[ num_voxels ];
if( host_colours ) {
size_t colour_data_size = num_voxels * sizeof( uchar3 );
cudaError_t err = cudaMalloc( &m_colours, colour_data_size );
if( err == cudaSuccess ) {
// Read data into host memory, copy to device and free host memory
ifs.read( ( char * ) host_colours, colour_data_size );
if( ifs ) {
err = cudaMemcpy( m_colours, host_colours, colour_data_size, cudaMemcpyHostToDevice);
if( err == cudaSuccess ) {
std::cout << " loaded colour data" << std::endl;
} else {
specific_error_message = "Failed to copy colour data to device";
success = false;
cudaFree( m_colours );
}
delete[] host_colours;
} else {
specific_error_message = "Failed to read colour data";
success = false;
delete[] host_colours;
cudaFree( m_colours );
}
} else {
specific_error_message = "Failed to allocate device memory for colour data";
success = false;
delete[] host_colours;
}
} else {
specific_error_message = "Failed to allocate host memory for colour data";
success = false;
}
}
// Load deformation data
if( success ) {
DeformationNode * host_deformations = new DeformationNode[ num_voxels ];
if( host_deformations ) {
size_t deformation_data_size = num_voxels * sizeof( DeformationNode );
cudaError_t err = cudaMalloc( &m_deformation_nodes, deformation_data_size );
if( err == cudaSuccess ) {
// Read data into host memory, copy to device and free host memory
ifs.read( ( char * ) host_deformations, deformation_data_size );
if( ifs ) {
err = cudaMemcpy( m_deformation_nodes, host_deformations, deformation_data_size, cudaMemcpyHostToDevice);
if( err == cudaSuccess ) {
std::cout << " loaded deformation data" << std::endl;
} else {
specific_error_message = "Failed to copy deformation data to device";
success = false;
cudaFree( m_deformation_nodes );
}
delete[] host_deformations;
} else {
specific_error_message = "Failed to read deformation data";
success = false;
delete[] host_deformations;
cudaFree( m_deformation_nodes );
}
} else {
specific_error_message = "Failed to allocate device memory for deformation data";
success = false;
delete[] host_deformations;
}
} else {
specific_error_message = "Failed to allocate host memory for deformation data";
success = false;
}
}
ifs.close();
if( !success ) {
std::string msg = "Failed to load TSDF ";
msg += file_name;
msg += " " + specific_error_message;
throw std::invalid_argument( msg );
}
}
/**
* Set the size of the volume. This will delete any existing values and resize the volume, clearing it when done.
* Volume offset is maintained
* @param volume_x X dimension in voxels
* @param volume_y Y dimension in voxels
* @param volume_z Z dimension in voxels
* @param psize_x Physical size in X dimension in mm
* @param psize_y Physical size in Y dimension in mm
* @param psize_z Physical size in Z dimension in mm
*/
__host__
void TSDFVolume::set_size( uint16_t volume_x, uint16_t volume_y, uint16_t volume_z, float psize_x, float psize_y, float psize_z) {
if ( ( volume_x != 0 && volume_y != 0 && volume_z != 0 ) && ( psize_x != 0 && psize_y != 0 && psize_z != 0 ) ) {
// Remove existing data
deallocate() ;
m_size = dim3 { volume_x, volume_y, volume_z };
m_physical_size = float3 { psize_x, psize_y, psize_z };
// Compute truncation distance - must be at least 2x max voxel size
m_voxel_size = f3_div_elem( m_physical_size, m_size );
// Set t > diagonal of voxel
m_truncation_distance = 1.1f * f3_norm( m_voxel_size );
// Allocate device storage
cudaError_t err;
size_t data_size = volume_x * volume_y * volume_z * sizeof( float );
err = cudaMalloc( &m_distances, data_size );
check_cuda_error( "Couldn't allocate space for distance data for TSDF", err );
err = cudaMalloc( &m_weights, data_size );
check_cuda_error( "Couldn't allocate space for weight data for TSDF", err );
err = cudaMalloc( &m_colours, volume_x * volume_y * volume_z * sizeof( uchar3 ) );
check_cuda_error( "Couldn't allocate space for colour data for TSDF", err );
err = cudaMalloc( &m_deformation_nodes, volume_x * volume_y * volume_z * sizeof( DeformationNode ) );
check_cuda_error( "Couldn't allocate space for deformation nodes for TSDF", err );
m_global_rotation = make_float3( 0.0f, 0.0f, 0.0f );
m_global_translation = make_float3( 0.0f, 0.0f, 0.0f );
clear();
// Max weight for integrating depth images
m_max_weight = 15.0f;
} else {
throw std::invalid_argument( "Attempt to set TSDF size or physical size to zero" );
}
}
#pragma mark - Data access
/**
* Set the distance data for the TSDF in one call
* @param distance_data Pointer to enough floats to populate the TSFD
*/
void TSDFVolume::set_distance_data( const float * distance_data ) {
size_t data_size = m_size.x * m_size.y * m_size.z * sizeof( float);
cudaError_t err = cudaMemcpy( m_distances, distance_data, data_size, cudaMemcpyHostToDevice );
check_cuda_error( "Couldn't set distance data", err );
}
/**
* Set the weight data for the TSDF in one call
* @param weight_data Pointer to enough floats to populate the TSFD
*/
void TSDFVolume::set_weight_data( const float * weight_data ) {
size_t data_size = m_size.x * m_size.y * m_size.z * sizeof( float);
cudaError_t err = cudaMemcpy( m_weights, weight_data, data_size, cudaMemcpyHostToDevice );
check_cuda_error( "Couldn't set weight data", err );
}
/**
* Set the deformation data for this space
* @param data Data in host memory space; Assumed to be vx*vy*vz DeformationNode
*/
void TSDFVolume::set_deformation( DeformationNode *deformation) {
size_t data_size = m_size.x * m_size.y * m_size.z * sizeof( DeformationNode );
cudaError_t err = cudaMemcpy( m_deformation_nodes, deformation, data_size, cudaMemcpyHostToDevice );
check_cuda_error( "Couldn't set deformation", err );
}
/**
* Reset the defomation grid by setting each translation point to the effectve, reglar position
* in space of that voxel centre and the related rotation to {0,0,0}
* @param deformation_nodes X x Y x Z array of DeformationNodes
* @param grid_size The size of the voxel grid
* @param voxel_size The size of an individual voxel
* @param grid_offset The offset of the grid
*/
__global__
void initialise_deformation( TSDFVolume::DeformationNode * deformation, dim3 grid_size, float3 voxel_size, float3 grid_offset ) {
// Extract the voxel Y and Z coordinates we then iterate over X
int vy = threadIdx.y + blockIdx.y * blockDim.y;
int vz = threadIdx.z + blockIdx.z * blockDim.z;
// If this thread is in range
if ( vy < grid_size.y && vz < grid_size.z ) {
// The next (x_size) elements from here are the x coords
size_t base_voxel_index = ((grid_size.x * grid_size.y) * vz ) + (grid_size.x * vy);
size_t voxel_index = base_voxel_index;
for ( int vx = 0; vx < grid_size.x; vx++ ) {
deformation[voxel_index].translation.x = (( vx + 0.5f ) * voxel_size.x) + grid_offset.x;
deformation[voxel_index].translation.y = (( vy + 0.5f ) * voxel_size.y) + grid_offset.y;
deformation[voxel_index].translation.z = (( vz + 0.5f ) * voxel_size.z) + grid_offset.z;
deformation[voxel_index].rotation.x = 0.0f;
deformation[voxel_index].rotation.y = 0.0f;
deformation[voxel_index].rotation.z = 0.0f;
voxel_index++;
}
}
}
__global__
void set_memory_to_value( float * pointer, int size, float value ) {
int idx = threadIdx.x + (blockIdx.x * blockDim.x );
if( idx < size ) {
pointer[idx] = value;
}
}
/**
* Clear the TSDF memory on the device
* zeros colour and weight data, sets distance to truncation_distance
*/
__host__
void TSDFVolume::clear( ) {
int data_size = m_size.x * m_size.y * m_size.z;
dim3 block( 1024, 1, 1 );
dim3 grid ( divUp( data_size, block.x ), 1, 1 );
cudaError_t err;
// Clear weights to 0
set_memory_to_value<<< grid, block >>>( m_weights, data_size, 0.0f );
cudaDeviceSynchronize( );
err = cudaGetLastError();
check_cuda_error( "Couldn't clear weight data", err );
// Set distance data to truncation distance
set_memory_to_value<<< grid, block >>>( m_distances, data_size, m_truncation_distance );
cudaDeviceSynchronize( );
err = cudaGetLastError();
check_cuda_error( "Couldn't clear depth data", err );
// Clear RGB data to black
err = cudaMemset( m_colours, data_size * 3, 0 );
check_cuda_error( "Couldn't clear colour data", err );
// Now initialise the deformations
dim3 block2( 1, 32, 32 );
dim3 grid2 ( 1, divUp( m_size.y, block2.y ), divUp( m_size.z, block2.z ) );
initialise_deformation <<<grid2, block2>>>( m_deformation_nodes, m_size, m_voxel_size, m_offset );
cudaDeviceSynchronize( );
err = cudaGetLastError();
check_cuda_error( "Couldn't initialise deformation nodes", err );
}
#pragma mark - Integrate new depth data
/**
* Integrate a range map into the TSDF
* This follows the approach in Cohen, N.S.V. 2013, 'Open Fusion', pp. 1–35.
* whereby new maps have less weight than existing maps
* @param depth_map Pointer to width*height depth values where 0 is an invalid depth and positive values are expressed in mm
* @param width The horiontal dimension of the depth_map
* @param height The height of the depth_map
* @param camera The camera from which the depth_map was taken
*/
__host__
void TSDFVolume::integrate( const uint16_t * depth_map, uint32_t width, uint32_t height, const Camera & camera ) {
assert( depth_map );
std::cout << "Integrating depth map size " << width << "x" << height << std::endl;
// Convert the input parameters to device (CUDA) types
Mat44 pose;
memcpy( &pose, camera.pose().data(), 16 * sizeof( float ) );
Mat44 inv_pose;
memcpy( &inv_pose, camera.inverse_pose().data(), 16 * sizeof( float ) );
Mat33 k;
memcpy( &k, camera.k().data(), 9 * sizeof( float ) );
Mat33 kinv;
memcpy( &kinv, camera.kinv().data(), 9 * sizeof( float ) );
// Copy depth map data to device
uint16_t * d_depth_map;
size_t data_size = width * height * sizeof( uint16_t);
cudaError_t err = cudaMalloc( &d_depth_map, data_size );
check_cuda_error( "Couldn't allocate storage for depth map", err);
err = cudaMemcpy( d_depth_map, depth_map, data_size, cudaMemcpyHostToDevice );
check_cuda_error( "Failed to copy depth map to GPU", err);
// Call the kernel
dim3 block( 1, 20, 20 );
dim3 grid ( 1, divUp( m_size.y, block.y ), divUp( m_size.z, block.z ) );
integrate_kernel <<< grid, block>>>( m_distances, m_weights, m_size, m_physical_size, m_deformation_nodes, m_offset, m_truncation_distance, m_max_weight, pose, inv_pose, k, kinv, width, height, d_depth_map);
cudaDeviceSynchronize( );
err = cudaGetLastError();
check_cuda_error( "Integrate kernel failed", err);
// Now delete depth map data from device
err = cudaFree( d_depth_map );
check_cuda_error( "Failed to deallocate cuda depth map", err);
std::cout << "Integration finished" << std::endl;
}
#pragma mark - Import/Export
/**
* Save the TSDF to a binary file
* @param The filename
* @return true if the file saved OK otherwise false.
*/
bool TSDFVolume::save_to_file( const std::string & file_name) const {
using namespace std;
bool success = true;
// We need to extract the data from the GPU device into host memory
float * host_distances = nullptr;
uchar3 * host_colours = nullptr;
float * host_weights = nullptr;
DeformationNode * host_deformation = nullptr;
size_t num_voxels = m_size.x * m_size.y * m_size.z;
cudaError_t err;
// Copy distance data from device to host
size_t distance_data_size = num_voxels * sizeof( float);
host_distances = new float[ num_voxels ];
if ( host_distances ) {
err = cudaMemcpy( host_distances, m_distances, distance_data_size, cudaMemcpyDeviceToHost);
if ( err != cudaSuccess ) {
success = false;
std::cout << "Failed to copy voxel data from device memory [" << err << "] " << std::endl;
}
} else {
std::cout << "Couldn't allocate host_voxels memory to save TSDF" << std::endl;
success = false;
}
// Copy weight data from device to host
size_t weight_data_size = num_voxels * sizeof( float);
if ( success ) {
host_weights = new float[ num_voxels ];
if ( host_weights) {
err = cudaMemcpy( host_weights, m_weights, weight_data_size, cudaMemcpyDeviceToHost);
if ( err != cudaSuccess ) {
success = false;
std::cout << "Failed to copy weight data from device memory [" << err << "] " << std::endl;
}
} else {
success = false;
std::cout << "Couldn't allocate host_weights memory to save TSDF" << std::endl;
}
}
// Copy colour data from device to host
size_t colour_data_size = num_voxels * sizeof( uchar3 );
if ( success ) {
host_colours = new uchar3[ num_voxels ];
if ( host_colours) {
err = cudaMemcpy( host_colours, m_colours, colour_data_size, cudaMemcpyDeviceToHost);
if ( err != cudaSuccess ) {
success = false;
std::cout << "Failed to copy colour data from device memory [" << err << "] " << std::endl;
}
} else {
success = false;
std::cout << "Couldn't allocate host_colours memory to save TSDF" << std::endl;
}
}
// Copy deformation data from device to host
size_t deformation_data_size = num_voxels * sizeof( DeformationNode );
if ( success ) {
host_deformation = new DeformationNode[ num_voxels ];
if ( host_deformation ) {
err = cudaMemcpy( host_deformation, m_deformation_nodes, deformation_data_size, cudaMemcpyDeviceToHost);
if ( err != cudaSuccess ) {
success = false;
std::cout << "Failed to copy deformation data from device memory [" << err << "] " << std::endl;
}
} else {
success = false;
std::cout << "Couldn't allocate host_weights memory to save TSDF" << std::endl;
}
}
// Now it's all local, write to file
if( success ) {
ofstream ofs { file_name, ios::out | ios::binary };
// Write dimesnions
size_t header_size = sizeof( m_size ) + sizeof( m_physical_size) + sizeof( m_offset) + sizeof( m_truncation_distance ) + sizeof( m_max_weight ) + sizeof( m_global_translation) + sizeof(m_global_rotation);
std::cout << " writing "<< header_size <<" bytes of header data" << std::endl;
ofs.write( (char *) &m_size, sizeof( m_size ) );
ofs.write( (char *)&m_physical_size, sizeof( m_physical_size));
ofs.write( (char *)&m_offset, sizeof( m_offset));
ofs.write( (char *)&m_truncation_distance, sizeof( m_truncation_distance));
ofs.write( (char *)&m_max_weight, sizeof( m_max_weight));
ofs.write( (char *)&m_global_translation, sizeof( m_global_translation));
ofs.write( (char *)&m_global_rotation, sizeof( m_global_rotation));
std::cout << " writing "<< distance_data_size <<" bytes of depth data" << std::endl;
ofs.write( (char *)host_distances, distance_data_size );
std::cout << " writing "<< weight_data_size <<" bytes of weight data" << std::endl;
ofs.write( (char *)host_weights, weight_data_size );
std::cout << " writing "<< colour_data_size <<" bytes of colour data" << std::endl;
ofs.write( (char *)host_colours, colour_data_size );
std::cout << " writing "<< deformation_data_size <<" bytes of deformation data" << std::endl;
ofs.write( (char *)host_deformation, deformation_data_size );
ofs.close();
} else {
std::cout << "Not saving file due to previous errors" << std::endl;
}
// Free up memory
if ( host_distances != nullptr ) { delete[] host_distances; }
if ( host_colours != nullptr ) { delete[] host_colours; }
if ( host_weights != nullptr ) { delete[] host_weights; }
if ( host_deformation != nullptr ) { delete[] host_deformation; }
return success;
}
/**
* Load the given TSDF file
* @param The filename
* @return true if the file saved OK otherwise false.
*/
bool TSDFVolume::load_from_file( const std::string & file_name) {
using namespace std;
ifstream ifs{ file_name, ios::in | ios::binary };
// Load dimensions
// Load data
// Move to device
std::cout << "Not yet implemented: load_from_file" << std::endl;
return false;
}
#pragma mark - Rendering
/**
* Render this TSDF to a raycast image
*/
void TSDFVolume::raycast( uint16_t width, uint16_t height, const Camera& camera, Eigen::Matrix<float, 3, Eigen::Dynamic>& vertices, Eigen::Matrix<float, 3, Eigen::Dynamic>& normals ) const {
GPURaycaster raycaster( width, height );
raycaster.raycast( *this, camera, vertices, normals );
}
|
6cd71939dfd0029eeaa2349b497da51b9c6d9022.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/ndarray/ndarray_apply_binary_core.h"
#include "oneflow/core/ndarray/binary_func.h"
namespace oneflow {
namespace {
template<typename T, template<typename> class binary_func>
__global__ void NdarrayApplyBinaryApplyGpu(size_t n,
typename BinaryFuncTrait<binary_func, T>::return_type* y,
const T* a, const T* b) {
NdarrayApplyBinaryCore<T, binary_func>::Apply(n, y, a, b);
}
template<typename T, template<typename> class binary_func>
__global__ void NdarrayApplyBinaryInplaceApplyGpu(size_t n, T* y, const T* x) {
NdarrayApplyBinaryCore<T, binary_func>::InplaceApply(n, y, x);
}
} // namespace
template<typename T, template<typename> class binary_func>
struct NdarrayApplyBinaryCoreWrapper<DeviceType::kGPU, T, binary_func> final {
static void Apply(ep::Stream* stream,
const XpuVarNdarray<typename BinaryFuncTrait<binary_func, T>::return_type>& y,
const XpuVarNdarray<const T>& a, const XpuVarNdarray<const T>& b) {
size_t n = y.host_shape().HostElemNum();
if (n == 0) { return; }
RUN_CUDA_KERNEL((NdarrayApplyBinaryApplyGpu<T, binary_func>), stream, n, n, y.host_ptr(),
a.host_ptr(), b.host_ptr());
}
static void InplaceApply(ep::Stream* stream, const XpuVarNdarray<T>& y,
const XpuVarNdarray<const T>& x) {
size_t n = y.host_shape().HostElemNum();
if (n == 0) { return; }
RUN_CUDA_KERNEL((NdarrayApplyBinaryInplaceApplyGpu<T, binary_func>), stream, n, n, y.host_ptr(),
x.host_ptr());
}
};
#define INSTANTIATE_NDARRAY_APPLY_BINARY_CORE(dtype_pair, binary_func) \
template struct NdarrayApplyBinaryCoreWrapper<DeviceType::kGPU, OF_PP_PAIR_FIRST(dtype_pair), \
binary_func>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_NDARRAY_APPLY_BINARY_CORE,
ARITHMETIC_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ
UNSIGNED_INT_DATA_TYPE_SEQ,
ARITHMETIC_BINARY_FUNC_SEQ);
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_NDARRAY_APPLY_BINARY_CORE,
ARITHMETIC_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ
UNSIGNED_INT_DATA_TYPE_SEQ,
LOGICAL_BINARY_FUNC_SEQ);
} // namespace oneflow
| 6cd71939dfd0029eeaa2349b497da51b9c6d9022.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/ndarray/ndarray_apply_binary_core.h"
#include "oneflow/core/ndarray/binary_func.h"
namespace oneflow {
namespace {
template<typename T, template<typename> class binary_func>
__global__ void NdarrayApplyBinaryApplyGpu(size_t n,
typename BinaryFuncTrait<binary_func, T>::return_type* y,
const T* a, const T* b) {
NdarrayApplyBinaryCore<T, binary_func>::Apply(n, y, a, b);
}
template<typename T, template<typename> class binary_func>
__global__ void NdarrayApplyBinaryInplaceApplyGpu(size_t n, T* y, const T* x) {
NdarrayApplyBinaryCore<T, binary_func>::InplaceApply(n, y, x);
}
} // namespace
template<typename T, template<typename> class binary_func>
struct NdarrayApplyBinaryCoreWrapper<DeviceType::kGPU, T, binary_func> final {
static void Apply(ep::Stream* stream,
const XpuVarNdarray<typename BinaryFuncTrait<binary_func, T>::return_type>& y,
const XpuVarNdarray<const T>& a, const XpuVarNdarray<const T>& b) {
size_t n = y.host_shape().HostElemNum();
if (n == 0) { return; }
RUN_CUDA_KERNEL((NdarrayApplyBinaryApplyGpu<T, binary_func>), stream, n, n, y.host_ptr(),
a.host_ptr(), b.host_ptr());
}
static void InplaceApply(ep::Stream* stream, const XpuVarNdarray<T>& y,
const XpuVarNdarray<const T>& x) {
size_t n = y.host_shape().HostElemNum();
if (n == 0) { return; }
RUN_CUDA_KERNEL((NdarrayApplyBinaryInplaceApplyGpu<T, binary_func>), stream, n, n, y.host_ptr(),
x.host_ptr());
}
};
#define INSTANTIATE_NDARRAY_APPLY_BINARY_CORE(dtype_pair, binary_func) \
template struct NdarrayApplyBinaryCoreWrapper<DeviceType::kGPU, OF_PP_PAIR_FIRST(dtype_pair), \
binary_func>;
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_NDARRAY_APPLY_BINARY_CORE,
ARITHMETIC_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ
UNSIGNED_INT_DATA_TYPE_SEQ,
ARITHMETIC_BINARY_FUNC_SEQ);
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_NDARRAY_APPLY_BINARY_CORE,
ARITHMETIC_DATA_TYPE_SEQ HALF_DATA_TYPE_SEQ
UNSIGNED_INT_DATA_TYPE_SEQ,
LOGICAL_BINARY_FUNC_SEQ);
} // namespace oneflow
|
9570e12bd2ee35f2354c8a9ab9a03e1f52db622c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
const char* getfield(char* line, int num){
const char* tok;
for (tok = strtok(line, ",");
tok && *tok;
tok = strtok(NULL, ",\n"))
{
if (!--num)
return tok;
}
return NULL;
}
__global__ void calcPot(double *u, double *unext, int *currRefracIter, double *currTime, int *d_it, double *w, double *lastTime, double *d_refracTime,
int *d_maxRefracIter, int *d_N, int *d_R, double *d_uth, int *d_minMPVIter, double *d_dt, double *d_mi, double *d_sumCoeff){
double pi=3.14159265359;
int it = *d_it;
double refracTime = *d_refracTime;
int maxRefracIter = *d_maxRefracIter;
int N = *d_N;
int R = *d_R;
double uth = *d_uth;
int minMPVIter = *d_minMPVIter;
double dt = *d_dt;
double mi = *d_mi;
double sumCoeff = *d_sumCoeff;
int myId = blockDim.x * blockIdx.x + threadIdx.x;
/*******Refractory Period*******/
if (*(u+myId)==0 && *(currRefracIter+myId)<maxRefracIter){
(*(currRefracIter+myId))++;
return;
}
else{
*(currRefracIter+myId)=0;
}
/*******Sum Calculation*******/
double sumVar=0.0;
int k,l;
int iLeftCorner=N+myId/N-R;
int jLeftCorner=N+myId%N-R;
for (k=iLeftCorner; k<iLeftCorner+2*R+1; k++){
for (l=jLeftCorner; l<jLeftCorner+2*R+1; l++){
sumVar+=*(u+myId)-*(u+(k%N)*N+l%N);
}
}
*(unext+myId)=*(u+myId)+dt*(mi-*(u+myId)+sumCoeff*sumVar);
*(currTime+myId)+=dt;
if(*(unext+myId)>=uth){ //Threshold crossed
*(unext+myId)=0.0;
if (it>=minMPVIter){
*(w+myId)=((*(w+myId))*(*(lastTime+myId))+2*pi)/((*(lastTime+myId))+(*(currTime+myId))+refracTime);
*(lastTime+myId)+=(*(currTime+myId))+refracTime;
}
*(currTime+myId)=0.0;
}
return;
}
int main(int argc, char** argv){
FILE *file1;
char filename[100];
/*******Parameter Declarations*******/
int N=100; //Grid dimension
double dt=0.001; //0.001
int totalTime=10000; //Simulation time
int it=0;
int totalIter=totalTime/dt; //Total iterations
int R=22; //Square radius
double sigma=0.7; //Coupling strength
double sumCoeff=sigma/((2*R+1)*(2*R+1)-1); //Potential sum coefficient
double mi=1.0; //Integrator floor
double uth=0.98;
double Ts=log(mi/(mi-uth));
double refracTime=0.22*Ts; //Refractory period time
int maxRefracIter=(int)ceil(refracTime/dt); //Refractory period iterations
int i,j;
double u[N*N];
double unext[N*N];
int currRefracIter[N*N]; //Current iterations already in refractory period
int maxMPVIter=30000;
int minMPVIter=2000000; //bhma meta to opoio me endiaferei na arxisw na upologizw thn syxnothta twn neurwnwn.
double currTime[N*N];
double lastTime[N*N];
double w[N*N];
double t=0.0;
for (i=0; i<N; i++){
for (j=0; j<N; j++){
(*(unext+i*N+j))=0.0;
(*(currTime+i*N+j))=0.0;
(*(lastTime+i*N+j))=0.0;
(*(currRefracIter+i*N+j))=0.0;
}
}
file1=fopen(argv[1],"r"); //argv[1]
char line[2048];
i=0;
while(fgets(line, 2048, file1)){
for(j=1;j<=N;j++){
char* tmp = strdup(line);
(*(u+N*i+j-1))=atof(getfield(tmp,j));
free(tmp);
}
i++;
}
fclose(file1);
double *d_u, *d_unext, *d_currTime, *d_w, *d_lastTime, *d_refracTime, *d_uth, *d_dt, *d_mi, *d_sumCoeff;
int *d_currRefracIter, *d_it, *d_maxRefracIter, *d_N, *d_R, *d_minMPVIter;
hipMalloc(&d_u, N*N*sizeof(double));
hipMalloc(&d_unext, N*N*sizeof(double));
hipMalloc(&d_currRefracIter, N*N*sizeof(int));
hipMalloc(&d_currTime, N*N*sizeof(double));
hipMalloc(&d_it, sizeof(int));
hipMalloc(&d_w, N*N*sizeof(double));
hipMalloc(&d_lastTime, N*N*sizeof(double));
hipMalloc(&d_refracTime, sizeof(double));
hipMalloc(&d_maxRefracIter, sizeof(int));
hipMalloc(&d_N, sizeof(int));
hipMalloc(&d_R, sizeof(int));
hipMalloc(&d_uth, sizeof(double));
hipMalloc(&d_minMPVIter, sizeof(int));
hipMalloc(&d_dt, sizeof(double));
hipMalloc(&d_mi, sizeof(double));
hipMalloc(&d_sumCoeff, sizeof(double));
hipMemcpy(d_refracTime, &refracTime, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_maxRefracIter, &maxRefracIter, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_N, &N, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_R, &R, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_uth, &uth, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_minMPVIter, &minMPVIter, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_dt, &dt, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_mi, &mi, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_sumCoeff, &sumCoeff, sizeof(double), hipMemcpyHostToDevice);
time_t benchBegin = time(NULL);
/*******Simulation*******/
while (it<totalIter){
if (it%10000==0) printf("Iteration %d of %d\n", it, totalIter);
hipMemcpy(d_u, u, N*N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_unext, unext, N*N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_currRefracIter, currRefracIter, N*N*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_currTime, currTime, N*N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_it, &it, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_w, w, N*N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_lastTime, lastTime, N*N*sizeof(double), hipMemcpyHostToDevice);
//printf("STARTING\n");
hipLaunchKernelGGL(( calcPot), dim3(100),dim3(100), 0, 0, d_u, d_unext, d_currRefracIter, d_currTime, d_it, d_w, d_lastTime, d_refracTime, d_maxRefracIter, d_N, d_R, d_uth, d_minMPVIter, d_dt, d_mi, d_sumCoeff);
hipDeviceSynchronize();
hipMemcpy(unext, d_unext, N*N*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(currRefracIter, d_currRefracIter, N*N*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(currTime, d_currTime, N*N*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(w, d_w, N*N*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(lastTime, d_lastTime, N*N*sizeof(double), hipMemcpyDeviceToHost);
// for(i=0; i<N; i++){
// for(j=0; j<N; j++){
// printf("%lf ", *(unext+N*i+j));
// }
// printf("\n");
// }
// printf("FINISHED\n");
if(it%10000==0){
sprintf(filename, "ResultsCUDA%s/Results_POT_LIF_2D_Classic_sigma_%lf_R_%d_time_%lf_.dat",argv[2],sigma,R,t);
file1=fopen(filename,"w");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
fprintf(file1, "%lf,",*(unext+N*i+j));
}
fprintf(file1,"\n");
}
fclose(file1);
}
if (it>minMPVIter){
if ((it-minMPVIter)%maxMPVIter==0){
sprintf(filename, "ResultsCUDA%s/Results_MPV_LIF_2D_Classic_sigma_%lf_R_%d_time_%lf_.dat",argv[2],sigma,R,t);
file1=fopen(filename,"w");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
fprintf(file1,"%lf,",*(w+N*i+j));
}
fprintf(file1,"\n");
}
fclose(file1);
}
}
if (it == 2000000){
time_t benchEnd = time(NULL);
sprintf(filename, "ResultsCUDA%s/execTime.dat",argv[2]);
file1=fopen(filename,"w");
fprintf(file1,"Execution time for 2000 time units: %ld seconds\n",benchEnd-benchBegin);
fclose(file1);
}
for (i=0; i<N; i++){
for (j=0; j<N; j++){
(*(u+N*i+j))=*(unext+N*i+j);
}
}
t+=dt;
it++;
} //edw kleinei h while.
hipFree(d_u);
hipFree(d_unext);
hipFree(d_currRefracIter);
hipFree(d_currTime);
hipFree(d_it);
hipFree(d_w);
hipFree(d_lastTime);
hipFree(d_refracTime);
hipFree(d_maxRefracIter);
hipFree(d_N);
hipFree(d_R);
hipFree(d_uth);
return(0);
}
| 9570e12bd2ee35f2354c8a9ab9a03e1f52db622c.cu | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
const char* getfield(char* line, int num){
const char* tok;
for (tok = strtok(line, ",");
tok && *tok;
tok = strtok(NULL, ",\n"))
{
if (!--num)
return tok;
}
return NULL;
}
__global__ void calcPot(double *u, double *unext, int *currRefracIter, double *currTime, int *d_it, double *w, double *lastTime, double *d_refracTime,
int *d_maxRefracIter, int *d_N, int *d_R, double *d_uth, int *d_minMPVIter, double *d_dt, double *d_mi, double *d_sumCoeff){
double pi=3.14159265359;
int it = *d_it;
double refracTime = *d_refracTime;
int maxRefracIter = *d_maxRefracIter;
int N = *d_N;
int R = *d_R;
double uth = *d_uth;
int minMPVIter = *d_minMPVIter;
double dt = *d_dt;
double mi = *d_mi;
double sumCoeff = *d_sumCoeff;
int myId = blockDim.x * blockIdx.x + threadIdx.x;
/*******Refractory Period*******/
if (*(u+myId)==0 && *(currRefracIter+myId)<maxRefracIter){
(*(currRefracIter+myId))++;
return;
}
else{
*(currRefracIter+myId)=0;
}
/*******Sum Calculation*******/
double sumVar=0.0;
int k,l;
int iLeftCorner=N+myId/N-R;
int jLeftCorner=N+myId%N-R;
for (k=iLeftCorner; k<iLeftCorner+2*R+1; k++){
for (l=jLeftCorner; l<jLeftCorner+2*R+1; l++){
sumVar+=*(u+myId)-*(u+(k%N)*N+l%N);
}
}
*(unext+myId)=*(u+myId)+dt*(mi-*(u+myId)+sumCoeff*sumVar);
*(currTime+myId)+=dt;
if(*(unext+myId)>=uth){ //Threshold crossed
*(unext+myId)=0.0;
if (it>=minMPVIter){
*(w+myId)=((*(w+myId))*(*(lastTime+myId))+2*pi)/((*(lastTime+myId))+(*(currTime+myId))+refracTime);
*(lastTime+myId)+=(*(currTime+myId))+refracTime;
}
*(currTime+myId)=0.0;
}
return;
}
int main(int argc, char** argv){
FILE *file1;
char filename[100];
/*******Parameter Declarations*******/
int N=100; //Grid dimension
double dt=0.001; //0.001
int totalTime=10000; //Simulation time
int it=0;
int totalIter=totalTime/dt; //Total iterations
int R=22; //Square radius
double sigma=0.7; //Coupling strength
double sumCoeff=sigma/((2*R+1)*(2*R+1)-1); //Potential sum coefficient
double mi=1.0; //Integrator floor
double uth=0.98;
double Ts=log(mi/(mi-uth));
double refracTime=0.22*Ts; //Refractory period time
int maxRefracIter=(int)ceil(refracTime/dt); //Refractory period iterations
int i,j;
double u[N*N];
double unext[N*N];
int currRefracIter[N*N]; //Current iterations already in refractory period
int maxMPVIter=30000;
int minMPVIter=2000000; //bhma meta to opoio me endiaferei na arxisw na upologizw thn syxnothta twn neurwnwn.
double currTime[N*N];
double lastTime[N*N];
double w[N*N];
double t=0.0;
for (i=0; i<N; i++){
for (j=0; j<N; j++){
(*(unext+i*N+j))=0.0;
(*(currTime+i*N+j))=0.0;
(*(lastTime+i*N+j))=0.0;
(*(currRefracIter+i*N+j))=0.0;
}
}
file1=fopen(argv[1],"r"); //argv[1]
char line[2048];
i=0;
while(fgets(line, 2048, file1)){
for(j=1;j<=N;j++){
char* tmp = strdup(line);
(*(u+N*i+j-1))=atof(getfield(tmp,j));
free(tmp);
}
i++;
}
fclose(file1);
double *d_u, *d_unext, *d_currTime, *d_w, *d_lastTime, *d_refracTime, *d_uth, *d_dt, *d_mi, *d_sumCoeff;
int *d_currRefracIter, *d_it, *d_maxRefracIter, *d_N, *d_R, *d_minMPVIter;
cudaMalloc(&d_u, N*N*sizeof(double));
cudaMalloc(&d_unext, N*N*sizeof(double));
cudaMalloc(&d_currRefracIter, N*N*sizeof(int));
cudaMalloc(&d_currTime, N*N*sizeof(double));
cudaMalloc(&d_it, sizeof(int));
cudaMalloc(&d_w, N*N*sizeof(double));
cudaMalloc(&d_lastTime, N*N*sizeof(double));
cudaMalloc(&d_refracTime, sizeof(double));
cudaMalloc(&d_maxRefracIter, sizeof(int));
cudaMalloc(&d_N, sizeof(int));
cudaMalloc(&d_R, sizeof(int));
cudaMalloc(&d_uth, sizeof(double));
cudaMalloc(&d_minMPVIter, sizeof(int));
cudaMalloc(&d_dt, sizeof(double));
cudaMalloc(&d_mi, sizeof(double));
cudaMalloc(&d_sumCoeff, sizeof(double));
cudaMemcpy(d_refracTime, &refracTime, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_maxRefracIter, &maxRefracIter, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_N, &N, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_R, &R, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_uth, &uth, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_minMPVIter, &minMPVIter, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_dt, &dt, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_mi, &mi, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_sumCoeff, &sumCoeff, sizeof(double), cudaMemcpyHostToDevice);
time_t benchBegin = time(NULL);
/*******Simulation*******/
while (it<totalIter){
if (it%10000==0) printf("Iteration %d of %d\n", it, totalIter);
cudaMemcpy(d_u, u, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_unext, unext, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_currRefracIter, currRefracIter, N*N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_currTime, currTime, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_it, &it, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_w, w, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_lastTime, lastTime, N*N*sizeof(double), cudaMemcpyHostToDevice);
//printf("STARTING\n");
calcPot<<<100,100>>>(d_u, d_unext, d_currRefracIter, d_currTime, d_it, d_w, d_lastTime, d_refracTime, d_maxRefracIter, d_N, d_R, d_uth, d_minMPVIter, d_dt, d_mi, d_sumCoeff);
cudaThreadSynchronize();
cudaMemcpy(unext, d_unext, N*N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(currRefracIter, d_currRefracIter, N*N*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(currTime, d_currTime, N*N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(w, d_w, N*N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(lastTime, d_lastTime, N*N*sizeof(double), cudaMemcpyDeviceToHost);
// for(i=0; i<N; i++){
// for(j=0; j<N; j++){
// printf("%lf ", *(unext+N*i+j));
// }
// printf("\n");
// }
// printf("FINISHED\n");
if(it%10000==0){
sprintf(filename, "ResultsCUDA%s/Results_POT_LIF_2D_Classic_sigma_%lf_R_%d_time_%lf_.dat",argv[2],sigma,R,t);
file1=fopen(filename,"w");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
fprintf(file1, "%lf,",*(unext+N*i+j));
}
fprintf(file1,"\n");
}
fclose(file1);
}
if (it>minMPVIter){
if ((it-minMPVIter)%maxMPVIter==0){
sprintf(filename, "ResultsCUDA%s/Results_MPV_LIF_2D_Classic_sigma_%lf_R_%d_time_%lf_.dat",argv[2],sigma,R,t);
file1=fopen(filename,"w");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
fprintf(file1,"%lf,",*(w+N*i+j));
}
fprintf(file1,"\n");
}
fclose(file1);
}
}
if (it == 2000000){
time_t benchEnd = time(NULL);
sprintf(filename, "ResultsCUDA%s/execTime.dat",argv[2]);
file1=fopen(filename,"w");
fprintf(file1,"Execution time for 2000 time units: %ld seconds\n",benchEnd-benchBegin);
fclose(file1);
}
for (i=0; i<N; i++){
for (j=0; j<N; j++){
(*(u+N*i+j))=*(unext+N*i+j);
}
}
t+=dt;
it++;
} //edw kleinei h while.
cudaFree(d_u);
cudaFree(d_unext);
cudaFree(d_currRefracIter);
cudaFree(d_currTime);
cudaFree(d_it);
cudaFree(d_w);
cudaFree(d_lastTime);
cudaFree(d_refracTime);
cudaFree(d_maxRefracIter);
cudaFree(d_N);
cudaFree(d_R);
cudaFree(d_uth);
return(0);
}
|
23d9e14ebf5c977714d091afed830d065430755c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2015 Patrick Putnam
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "square.h"
#include <stdio.h>
#include <iostream>
#include <cassert>
// from CUDA documentation
//const unsigned int maxThreadsPerState = 256;
// each thread initializes a random state
__global__ void initRNG( hiprandState_t * rngStates, Square::seed_type * seeds, unsigned int N ) {
unsigned int bid = blockIdx.y * gridDim.x + blockIdx.x;
unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int tmax = blockDim.x * blockDim.y;
unsigned int idx = bid * tmax + tid;
__shared__ unsigned int tmp[ 1024 ];
if( idx < N ) {
tmp[ tid ] = seeds[ idx ];
__syncthreads();
hiprandState_t localState;
hiprand_init( tmp[tid], tid, 0, &localState );
rngStates[ idx ] = localState;
}
}
/*
__global__ void initRNG( hiprandStateMtgp32_t * rngStates, Square::seed_type * seeds, unsigned int N ) {
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
hiprand_init( seed, idx, 0, &rngStates[idx] );
}*/
__global__ void square( Square::int_type * a, int N ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx < N ) a[idx] = idx * idx;
}
template < class State >
__global__ void squareRNG( Square::int_type * a, int N, State * rngStates ) {
unsigned int bid = blockIdx.y * gridDim.x + blockIdx.x;
unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int idx = bid * (blockDim.x * blockDim.y) + tid;
__shared__ Square::int_type tmp[ 1024 ]; // assume max 1024 threads/block
// every thread generates a random number
if( idx < N ) {
tmp[tid] = hiprand(rngStates + (bid * blockDim.y + threadIdx.y) );
a[ idx ] = tmp[tid] * tmp[tid]; // copy square to global memory
}
}
template < class State >
__global__ void squareRNG( Square::int_type * a, int N, State * rngState, unsigned int rounds ) {
unsigned int bid = blockIdx.y * gridDim.x + blockIdx.x;
unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int idx = bid * (blockDim.x * blockDim.y) + tid;
while( idx < N && rounds-- ) {
Square::int_type tmp = hiprand(rngState + bid );
tmp *= tmp;
a[idx] = tmp;
idx += blockDim.x;
}
}
Square::Square( boost::random::mt19937 & rng ) :
m_a(NULL)
, m_dest(NULL)
, m_size(0)
, m_capacity(0)
, m_maxBlocks(0)
, m_maxThreadsPerBlock(0)
, m_status(true)
, m_dStates( &rng )
{
init();
}
Square::Square( boost::random::mt19937 * rng ) :
m_a(NULL)
, m_dest(NULL)
, m_size(0)
, m_capacity(0)
, m_maxBlocks(0)
, m_maxThreadsPerBlock(0)
, m_status(true)
, m_dStates( rng )
{
init();
}
void Square::init() {
hipDeviceProp_t m_cdp;
hipError_t err = hipGetDeviceProperties( &m_cdp, 0 );
if( err != hipSuccess ) {
std::cerr << "Unable to get device properties" << std::endl;
m_status = false;
return;
} else {
std::cerr << "Maximum Threads Per Block: " << m_cdp.maxThreadsPerBlock << std::endl;
m_maxThreadsPerBlock = m_cdp.maxThreadsPerBlock;
m_maxBlocks = m_cdp.maxGridSize[0];
}
// initialize random states
// unsigned int state_count = m_maxThreadsPerBlock / maxThreadsPerState;
//
// if( m_maxThreadsPerBlock % maxThreadsPerState ) { ++state_count; }
// m_dStates.resize( state_count );
}
Square::~Square() {
if( m_a ) free(m_a);
if( m_dest ) hipFree( m_dest );
}
size_t Square::size() const { return m_size; }
bool Square::good() const {
return m_status && m_dStates.good();
}
void Square::operator()( unsigned int s ) {
if( !good() ) return;
resize( s );
assert( s == m_size );
int block_count = (m_size / m_maxThreadsPerBlock );
if( m_size % m_maxThreadsPerBlock ) { ++block_count; }
dim3 gdim( block_count, 1, 1),
bdim = curand_state_type::makeBlockDimension( m_maxThreadsPerBlock );
m_dStates.updateStates( gdim.x * gdim.y * bdim.y );
// std::cerr << "Block Dimensions: <" << bdim.x << ", " << bdim.y << ", " << bdim.z << ">" << std::endl;
// std::cerr << "Grid Dimensions: <" << gdim.x << ", " << gdim.y << ", " << gdim.z << ">" << std::endl;
//squareRNG<<< gdim, bdim >>>( m_dest, m_size, m_dStates.getStates() );
hipLaunchKernelGGL(( squareRNG), dim3(block_count), dim3(bdim.x) , 0, 0, m_dest, m_size, m_dStates.getStates(), bdim.y );
//int_type * d = m_dest;
//curand_state_type::pointer pS = m_dStates.getStates();
//while( s ) {
// if( s >= bdim.x ) {
// squareRNG<<< 1, bdim.x >>>( d, bdim.x, pS );
// d += bdim.x;
// s -= bdim.x;
// ++pS;
// } else {
// squareRNG<<< 1, bdim.x >>>( d, s, pS );
// s = 0;
// }
//}
hipMemcpy( m_a, m_dest, m_size * sizeof(int_type), hipMemcpyDeviceToHost );
}
void Square::resize( unsigned int s ) {
if( !good() ) return;
if ( s > m_capacity ) {
if( m_a ) {
free( m_a );
}
if( m_dest ) {
hipFree( m_dest );
}
size_t byte_size = s * sizeof(int_type );
m_a = (int_type *) malloc( byte_size );
hipError_t err = hipMalloc( (void **) &m_dest, byte_size );
if( err != hipSuccess ) {
std::cerr << "Unable to allocate device memory: " << std::endl;
}
m_capacity = s;
}
m_size = s;
}
/*
void Square::random_list() {
int block_count = (m_size / m_maxThreadsPerBlock);
if( m_size % m_maxThreadsPerBlock ) {
++block_count;
}
// max 256 threads/hiprandState_t
unsigned int state_count = block_count * ( m_maxThreadsPerBlock / 256 );
unsigned int seed_size = state_count * sizeof(unsigned int);
unsigned int state_size = state_count * sizeof( hiprandState_t );
unsigned int * seeds;
seeds = malloc( seed_size );
unsigned int * d_seeds;
hipMalloc( (void **) &d_seeds, seed_size );
unsigned int t = state_count;
while( t-- ) {
seeds[ t ] = m_dist( *m_rng );
}
hipMemcpy( d_seeds, seeds, seed_size, hipMemcpyHostToDevice );
hiprandState_t *d_rngStates = 0;
hipMalloc( (void **) &d_rngStates, state_size );
unsigned int bcount = state_count / m_maxThreadsPerBlock;
if( state_count % m_maxThreadsPerBlock ) { ++bcount; }
unsigned int bx = bcount, by = 1;
if( bcount > m_maxBlocks ) {
by = bcount / m_maxBlocks;
if( bcount % m_maxBlocks ) { ++by; }
bx = m_maxBlocks;
}
unsigned int tx = m_maxThreadsPerBlock, ty = 1;
initRNG<<< dim3(bx, by, 1), dim3(tx, ty, 1) >>>( d_rngStates, d_seeds, state_count );
squareRNG<<<1, block_count, m_maxThreadsPerBlock >>>( m_dest, m_size, d_rngStates );
free(seeds); // should be performed on host during squareRNG on device
hipMemcpy( m_a, m_dest, m_size * sizeof(int_type), hipMemcpyDeviceToHost );
hipFree( d_rngStates );
hipFree( d_seeds );
}*/
std::ostream & operator<<( std::ostream & out, const Square & rhs ) {
if( rhs.good() ) {
for( unsigned int i = 0; i < rhs.size(); ++i ) {
out << i << " -> " << rhs.m_a[i] << "\n";
}
} else {
out << "BAD STATE" << "\n";
}
return out;
}
| 23d9e14ebf5c977714d091afed830d065430755c.cu | // Copyright 2015 Patrick Putnam
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "square.h"
#include <stdio.h>
#include <iostream>
#include <cassert>
// from CUDA documentation
//const unsigned int maxThreadsPerState = 256;
// each thread initializes a random state
__global__ void initRNG( curandState * rngStates, Square::seed_type * seeds, unsigned int N ) {
unsigned int bid = blockIdx.y * gridDim.x + blockIdx.x;
unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int tmax = blockDim.x * blockDim.y;
unsigned int idx = bid * tmax + tid;
__shared__ unsigned int tmp[ 1024 ];
if( idx < N ) {
tmp[ tid ] = seeds[ idx ];
__syncthreads();
curandState localState;
curand_init( tmp[tid], tid, 0, &localState );
rngStates[ idx ] = localState;
}
}
/*
__global__ void initRNG( curandStateMtgp32_t * rngStates, Square::seed_type * seeds, unsigned int N ) {
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
curand_init( seed, idx, 0, &rngStates[idx] );
}*/
__global__ void square( Square::int_type * a, int N ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx < N ) a[idx] = idx * idx;
}
template < class State >
__global__ void squareRNG( Square::int_type * a, int N, State * rngStates ) {
unsigned int bid = blockIdx.y * gridDim.x + blockIdx.x;
unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int idx = bid * (blockDim.x * blockDim.y) + tid;
__shared__ Square::int_type tmp[ 1024 ]; // assume max 1024 threads/block
// every thread generates a random number
if( idx < N ) {
tmp[tid] = curand(rngStates + (bid * blockDim.y + threadIdx.y) );
a[ idx ] = tmp[tid] * tmp[tid]; // copy square to global memory
}
}
template < class State >
__global__ void squareRNG( Square::int_type * a, int N, State * rngState, unsigned int rounds ) {
unsigned int bid = blockIdx.y * gridDim.x + blockIdx.x;
unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int idx = bid * (blockDim.x * blockDim.y) + tid;
while( idx < N && rounds-- ) {
Square::int_type tmp = curand(rngState + bid );
tmp *= tmp;
a[idx] = tmp;
idx += blockDim.x;
}
}
Square::Square( boost::random::mt19937 & rng ) :
m_a(NULL)
, m_dest(NULL)
, m_size(0)
, m_capacity(0)
, m_maxBlocks(0)
, m_maxThreadsPerBlock(0)
, m_status(true)
, m_dStates( &rng )
{
init();
}
Square::Square( boost::random::mt19937 * rng ) :
m_a(NULL)
, m_dest(NULL)
, m_size(0)
, m_capacity(0)
, m_maxBlocks(0)
, m_maxThreadsPerBlock(0)
, m_status(true)
, m_dStates( rng )
{
init();
}
void Square::init() {
cudaDeviceProp m_cdp;
cudaError_t err = cudaGetDeviceProperties( &m_cdp, 0 );
if( err != cudaSuccess ) {
std::cerr << "Unable to get device properties" << std::endl;
m_status = false;
return;
} else {
std::cerr << "Maximum Threads Per Block: " << m_cdp.maxThreadsPerBlock << std::endl;
m_maxThreadsPerBlock = m_cdp.maxThreadsPerBlock;
m_maxBlocks = m_cdp.maxGridSize[0];
}
// initialize random states
// unsigned int state_count = m_maxThreadsPerBlock / maxThreadsPerState;
//
// if( m_maxThreadsPerBlock % maxThreadsPerState ) { ++state_count; }
// m_dStates.resize( state_count );
}
Square::~Square() {
if( m_a ) free(m_a);
if( m_dest ) cudaFree( m_dest );
}
size_t Square::size() const { return m_size; }
bool Square::good() const {
return m_status && m_dStates.good();
}
void Square::operator()( unsigned int s ) {
if( !good() ) return;
resize( s );
assert( s == m_size );
int block_count = (m_size / m_maxThreadsPerBlock );
if( m_size % m_maxThreadsPerBlock ) { ++block_count; }
dim3 gdim( block_count, 1, 1),
bdim = curand_state_type::makeBlockDimension( m_maxThreadsPerBlock );
m_dStates.updateStates( gdim.x * gdim.y * bdim.y );
// std::cerr << "Block Dimensions: <" << bdim.x << ", " << bdim.y << ", " << bdim.z << ">" << std::endl;
// std::cerr << "Grid Dimensions: <" << gdim.x << ", " << gdim.y << ", " << gdim.z << ">" << std::endl;
//squareRNG<<< gdim, bdim >>>( m_dest, m_size, m_dStates.getStates() );
squareRNG<<< block_count, bdim.x >>>( m_dest, m_size, m_dStates.getStates(), bdim.y );
//int_type * d = m_dest;
//curand_state_type::pointer pS = m_dStates.getStates();
//while( s ) {
// if( s >= bdim.x ) {
// squareRNG<<< 1, bdim.x >>>( d, bdim.x, pS );
// d += bdim.x;
// s -= bdim.x;
// ++pS;
// } else {
// squareRNG<<< 1, bdim.x >>>( d, s, pS );
// s = 0;
// }
//}
cudaMemcpy( m_a, m_dest, m_size * sizeof(int_type), cudaMemcpyDeviceToHost );
}
void Square::resize( unsigned int s ) {
if( !good() ) return;
if ( s > m_capacity ) {
if( m_a ) {
free( m_a );
}
if( m_dest ) {
cudaFree( m_dest );
}
size_t byte_size = s * sizeof(int_type );
m_a = (int_type *) malloc( byte_size );
cudaError_t err = cudaMalloc( (void **) &m_dest, byte_size );
if( err != cudaSuccess ) {
std::cerr << "Unable to allocate device memory: " << std::endl;
}
m_capacity = s;
}
m_size = s;
}
/*
void Square::random_list() {
int block_count = (m_size / m_maxThreadsPerBlock);
if( m_size % m_maxThreadsPerBlock ) {
++block_count;
}
// max 256 threads/curandState
unsigned int state_count = block_count * ( m_maxThreadsPerBlock / 256 );
unsigned int seed_size = state_count * sizeof(unsigned int);
unsigned int state_size = state_count * sizeof( curandState );
unsigned int * seeds;
seeds = malloc( seed_size );
unsigned int * d_seeds;
cudaMalloc( (void **) &d_seeds, seed_size );
unsigned int t = state_count;
while( t-- ) {
seeds[ t ] = m_dist( *m_rng );
}
cudaMemcpy( d_seeds, seeds, seed_size, cudaMemcpyHostToDevice );
curandState *d_rngStates = 0;
cudaMalloc( (void **) &d_rngStates, state_size );
unsigned int bcount = state_count / m_maxThreadsPerBlock;
if( state_count % m_maxThreadsPerBlock ) { ++bcount; }
unsigned int bx = bcount, by = 1;
if( bcount > m_maxBlocks ) {
by = bcount / m_maxBlocks;
if( bcount % m_maxBlocks ) { ++by; }
bx = m_maxBlocks;
}
unsigned int tx = m_maxThreadsPerBlock, ty = 1;
initRNG<<< dim3(bx, by, 1), dim3(tx, ty, 1) >>>( d_rngStates, d_seeds, state_count );
squareRNG<<<1, block_count, m_maxThreadsPerBlock >>>( m_dest, m_size, d_rngStates );
free(seeds); // should be performed on host during squareRNG on device
cudaMemcpy( m_a, m_dest, m_size * sizeof(int_type), cudaMemcpyDeviceToHost );
cudaFree( d_rngStates );
cudaFree( d_seeds );
}*/
std::ostream & operator<<( std::ostream & out, const Square & rhs ) {
if( rhs.good() ) {
for( unsigned int i = 0; i < rhs.size(); ++i ) {
out << i << " -> " << rhs.m_a[i] << "\n";
}
} else {
out << "BAD STATE" << "\n";
}
return out;
}
|
921890d1baaa21443e9ddcc952f1b323cdb1e761.hip | // !!! This is a file automatically generated by hipify!!!
#include "PyC_types.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "stdio.h"
#include <sys/time.h>
#include <iostream>
#define DEBUG 1
#define T 6
extern "C" {
void dimer_1min1pls_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyFloat* sign, PyInt* n_orb1, PyInt* n_orb2, Double** Rc1, Double** Rcca1, Double** Ra2, Double** Rcaa2, Double** h, Double** V1, Double** V2)
{
struct timeval start,stop;
gettimeofday(&start,0);
for(int n=0; n<n_elem; n++) {
int index = i[n]*dim[n]+j[n];
// Upper loop
for(int p1=0; p1<n_orb1[n]; p1++) {
for(int r1=0; r1<n_orb1[n]; r1++) {
for(int q1=0; q1<n_orb1[n]; q1++) {
for(int s2=0; s2<n_orb2[n]; s2++) {
H[n][index] += sign[n] * V1[n][((p1*n_orb1[n] + q1)*n_orb1[n] + r1)*n_orb2[n] + s2] * Rcca1[n][(q1*n_orb1[n] + p1)*n_orb1[n] + r1] * Ra2[n][s2];
}
}
}
}
// Middle loop
for(int p11=0; p11<n_orb1[n]; p11++) {
for(int q2=0; q2<n_orb2[n]; q2++) {
for(int r2=0; r2<n_orb2[n]; r2++) {
for(int s2=0; s2<n_orb2[n]; s2++) {
H[n][index] += sign[n] * V2[n][((p11*n_orb2[n] + q2)*n_orb2[n] + r2)*n_orb2[n] + s2] * Rc1[n][p1] * Rcaa2[n][(q2*n_orb2[n] + s2)*n_orb2[n] + r2];
}
}
}
}
H[n][index] *= 2;
// Bottom Loop
for(int p12=0; p12<n_orb1[n]; p12++) {
for(int q22=0; q22<n_orb2[n]; q22++) {
H[n][index] += h[n][p12*n_orb2[n]+q22] * Rc1[n][p12] * Ra2[q22];
}
}
}
gettimeofday(&stop,0);
if(DEBUG) {
double t = (double)(stop.tv_sec-start.tv_sec)*1000+(double)(stop.tv_usec-start.tv_usec)/1000;
printf("dimer_1min1pls_loop finished in %f ms\n", t);
}
}
}
/*
//std::vector<std::thread> hostThreads;
// lambda expression to create streams
auto createStreams = [streams]() {
for(int i=0; i<numStreams; i++) {
gpuErr(hipStreamCreate(&streams[i]));
}
};
//hostThreads.push_back(std::move(std::thread(createStreams)));
for(std::thread& t : hostThreads) {
if(t.joinable())
t.join();
}
*/
// not sure if hipMalloc is threadsafe if called from multiple host threads
// will experiment with this -- concurrent allocation would save significant time
// generalized lambda expression to perform a cudamalloc
/*
auto cudaPreMalloc = [](double *arr, unsigned long size) {
gpuErr(hipMalloc((void **) &arr, size));
};
threads.push(std::thread(cudaPreMalloc, &d_V1112, sizeof(double)*N4*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_Rcca1, sizeof(double)*N3*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_Ra2, sizeof(double)*n_orb2*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_V1222, sizeof(double)*n_orb1*n_orb2*n_orb2*n_orb2*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_Rcaa2, sizeof(double)*n_orb2*n_orb2*n_orb2*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_Rc1, sizeof(double)*n_orb1*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_H, sizeof(double)*n_orb1*n_orb1*n_orb1*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_h, sizeof(double)*n_orb1*n_orb2*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_Hr, sizeof(double)*blocks*n_elem/numChunks));
*/
| 921890d1baaa21443e9ddcc952f1b323cdb1e761.cu | #include "PyC_types.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "stdio.h"
#include <sys/time.h>
#include <iostream>
#define DEBUG 1
#define T 6
extern "C" {
void dimer_1min1pls_loop(PyInt n_elem, Double** H, PyInt* i, PyInt* j, PyInt* dim, PyFloat* sign, PyInt* n_orb1, PyInt* n_orb2, Double** Rc1, Double** Rcca1, Double** Ra2, Double** Rcaa2, Double** h, Double** V1, Double** V2)
{
struct timeval start,stop;
gettimeofday(&start,0);
for(int n=0; n<n_elem; n++) {
int index = i[n]*dim[n]+j[n];
// Upper loop
for(int p1=0; p1<n_orb1[n]; p1++) {
for(int r1=0; r1<n_orb1[n]; r1++) {
for(int q1=0; q1<n_orb1[n]; q1++) {
for(int s2=0; s2<n_orb2[n]; s2++) {
H[n][index] += sign[n] * V1[n][((p1*n_orb1[n] + q1)*n_orb1[n] + r1)*n_orb2[n] + s2] * Rcca1[n][(q1*n_orb1[n] + p1)*n_orb1[n] + r1] * Ra2[n][s2];
}
}
}
}
// Middle loop
for(int p11=0; p11<n_orb1[n]; p11++) {
for(int q2=0; q2<n_orb2[n]; q2++) {
for(int r2=0; r2<n_orb2[n]; r2++) {
for(int s2=0; s2<n_orb2[n]; s2++) {
H[n][index] += sign[n] * V2[n][((p11*n_orb2[n] + q2)*n_orb2[n] + r2)*n_orb2[n] + s2] * Rc1[n][p1] * Rcaa2[n][(q2*n_orb2[n] + s2)*n_orb2[n] + r2];
}
}
}
}
H[n][index] *= 2;
// Bottom Loop
for(int p12=0; p12<n_orb1[n]; p12++) {
for(int q22=0; q22<n_orb2[n]; q22++) {
H[n][index] += h[n][p12*n_orb2[n]+q22] * Rc1[n][p12] * Ra2[q22];
}
}
}
gettimeofday(&stop,0);
if(DEBUG) {
double t = (double)(stop.tv_sec-start.tv_sec)*1000+(double)(stop.tv_usec-start.tv_usec)/1000;
printf("dimer_1min1pls_loop finished in %f ms\n", t);
}
}
}
/*
//std::vector<std::thread> hostThreads;
// lambda expression to create streams
auto createStreams = [streams]() {
for(int i=0; i<numStreams; i++) {
gpuErr(cudaStreamCreate(&streams[i]));
}
};
//hostThreads.push_back(std::move(std::thread(createStreams)));
for(std::thread& t : hostThreads) {
if(t.joinable())
t.join();
}
*/
// not sure if cudaMalloc is threadsafe if called from multiple host threads
// will experiment with this -- concurrent allocation would save significant time
// generalized lambda expression to perform a cudamalloc
/*
auto cudaPreMalloc = [](double *arr, unsigned long size) {
gpuErr(cudaMalloc((void **) &arr, size));
};
threads.push(std::thread(cudaPreMalloc, &d_V1112, sizeof(double)*N4*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_Rcca1, sizeof(double)*N3*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_Ra2, sizeof(double)*n_orb2*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_V1222, sizeof(double)*n_orb1*n_orb2*n_orb2*n_orb2*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_Rcaa2, sizeof(double)*n_orb2*n_orb2*n_orb2*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_Rc1, sizeof(double)*n_orb1*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_H, sizeof(double)*n_orb1*n_orb1*n_orb1*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_h, sizeof(double)*n_orb1*n_orb2*n_elem/numChunks));
threads.push(std::thread(cudaPreMalloc, &d_Hr, sizeof(double)*blocks*n_elem/numChunks));
*/
|
a2e675c12d356fe50a860c9843b6d9b326b77114.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include <hip/hip_runtime.h>
#define TPB 16
#define PARTICLES 1000000
#define ITTERATIONS 10
struct Particle
{
float3 position;
float3 velocity;
};
__device__ float3 operator+(const float3& p1, const float3& p2)
{
return make_float3(p1.x + p2.x, p1.y + p2.y, p1.z + p2.z);
}
__device__ float3 operator*(const float3& p1, const int& p2)
{
return make_float3(p1.x * p2, p1.y * p2, p1.z * p2);
}
__host__ int operator!=(const float3& p1, const float3& p2)
{
if (p1.x != p2.x)
return 1;
else if (p1.y != p2.y)
return 1;
else if (p1.z != p2.z)
return 1;
else
return 0;
}
__global__ void update_gpu(Particle* particles, int dt)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
float3 delta_velocity = make_float3(1, 2, 3);
particles[i].velocity = particles[i].velocity + delta_velocity;
particles[i].position = particles[i].position + particles[i].velocity * dt;
}
void update_cpu(int n, Particle* particles, int dt)
{
for (int i = 0; i < n; i++)
{
particles[i].velocity.x += 1;
particles[i].velocity.y += 2;
particles[i].velocity.z += 3;
particles[i].position.x += particles[i].velocity.x * dt;
particles[i].position.y += particles[i].velocity.y * dt;
particles[i].position.z += particles[i].velocity.z * dt;
}
}
int particle_compare(int n, Particle* cpu, Particle* gpu)
{
for (int i = 0; i < n; i++)
{
if (cpu[i].position != gpu[i].position || cpu[i].velocity != gpu[i].velocity)
return 0;
}
return 1;
}
int main()
{
double time_diff = 0.0;
clock_t start, end;
Particle *particles = (Particle*)malloc(PARTICLES * sizeof(Particle));
for (int i = 0; i < PARTICLES; i++)
{
particles[i].position = make_float3(rand() % 100, rand() % 100, rand() % 100);
particles[i].velocity = make_float3(rand() % 100, rand() % 100, rand() % 100);
}
Particle* particles_gpu;
Particle* results_particles = (Particle*)malloc(PARTICLES * sizeof(Particle));
hipMalloc(&particles_gpu, PARTICLES * sizeof(Particle));
hipMemcpy(particles_gpu, particles, PARTICLES * sizeof(Particle), hipMemcpyHostToDevice);
for (int i = 0; i < ITTERATIONS; i++)
{
update_gpu << <(PARTICLES + TPB - 1) / TPB, TPB >> > (particles_gpu, 1);
}
start = clock();
for (int i = 0; i < ITTERATIONS; i++)
{
update_cpu(PARTICLES, particles, 1);
}
end = clock();
time_diff = (double)(end - start) / CLOCKS_PER_SEC;
printf("CPU execution time: %f seconds\n", time_diff);
hipMemcpy(results_particles, particles_gpu, PARTICLES * sizeof(Particle), hipMemcpyDeviceToHost);
if (particle_compare(PARTICLES, particles, results_particles))
printf("Comparison Successful\n");
else
printf("Error\n");
hipFree(particles_gpu);
free(particles);
free(results_particles);
return 0;
} | a2e675c12d356fe50a860c9843b6d9b326b77114.cu | #include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
#define TPB 16
#define PARTICLES 1000000
#define ITTERATIONS 10
struct Particle
{
float3 position;
float3 velocity;
};
__device__ float3 operator+(const float3& p1, const float3& p2)
{
return make_float3(p1.x + p2.x, p1.y + p2.y, p1.z + p2.z);
}
__device__ float3 operator*(const float3& p1, const int& p2)
{
return make_float3(p1.x * p2, p1.y * p2, p1.z * p2);
}
__host__ int operator!=(const float3& p1, const float3& p2)
{
if (p1.x != p2.x)
return 1;
else if (p1.y != p2.y)
return 1;
else if (p1.z != p2.z)
return 1;
else
return 0;
}
__global__ void update_gpu(Particle* particles, int dt)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
float3 delta_velocity = make_float3(1, 2, 3);
particles[i].velocity = particles[i].velocity + delta_velocity;
particles[i].position = particles[i].position + particles[i].velocity * dt;
}
void update_cpu(int n, Particle* particles, int dt)
{
for (int i = 0; i < n; i++)
{
particles[i].velocity.x += 1;
particles[i].velocity.y += 2;
particles[i].velocity.z += 3;
particles[i].position.x += particles[i].velocity.x * dt;
particles[i].position.y += particles[i].velocity.y * dt;
particles[i].position.z += particles[i].velocity.z * dt;
}
}
int particle_compare(int n, Particle* cpu, Particle* gpu)
{
for (int i = 0; i < n; i++)
{
if (cpu[i].position != gpu[i].position || cpu[i].velocity != gpu[i].velocity)
return 0;
}
return 1;
}
int main()
{
double time_diff = 0.0;
clock_t start, end;
Particle *particles = (Particle*)malloc(PARTICLES * sizeof(Particle));
for (int i = 0; i < PARTICLES; i++)
{
particles[i].position = make_float3(rand() % 100, rand() % 100, rand() % 100);
particles[i].velocity = make_float3(rand() % 100, rand() % 100, rand() % 100);
}
Particle* particles_gpu;
Particle* results_particles = (Particle*)malloc(PARTICLES * sizeof(Particle));
cudaMalloc(&particles_gpu, PARTICLES * sizeof(Particle));
cudaMemcpy(particles_gpu, particles, PARTICLES * sizeof(Particle), cudaMemcpyHostToDevice);
for (int i = 0; i < ITTERATIONS; i++)
{
update_gpu << <(PARTICLES + TPB - 1) / TPB, TPB >> > (particles_gpu, 1);
}
start = clock();
for (int i = 0; i < ITTERATIONS; i++)
{
update_cpu(PARTICLES, particles, 1);
}
end = clock();
time_diff = (double)(end - start) / CLOCKS_PER_SEC;
printf("CPU execution time: %f seconds\n", time_diff);
cudaMemcpy(results_particles, particles_gpu, PARTICLES * sizeof(Particle), cudaMemcpyDeviceToHost);
if (particle_compare(PARTICLES, particles, results_particles))
printf("Comparison Successful\n");
else
printf("Error\n");
cudaFree(particles_gpu);
free(particles);
free(results_particles);
return 0;
} |
4f10f52129686cd220b86d08239f60047e485eae.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "unit_test.h"
#include "vector.h"
#include "matrix.h"
#include "norm.h"
#include "ctime"
namespace amgx
{
DECLARE_UNITTEST_BEGIN(NormTests);
void test_get_norm(Vector_h &block_nrm, const Vector_h &vec, const NormType norm_type, int bdim = 1, int offset = 0)
{
UNITTEST_ASSERT_TRUE_DESC("Only L1 and L2 are supported in this unit test", norm_type == L1 || norm_type == L2);
block_nrm.resize(bdim, ValueTypeB(0));
std::vector <typename Vector_h::value_type> norm(bdim, 0.l);
if (norm_type == L1)
{
for (int i = 0; i < (vec.size() / bdim); i++)
for (int j = 0; j < bdim; j++)
{
norm[j] += ::fabs(vec[(offset + i) * bdim + j]);
}
for (int j = 0; j < bdim; j++)
{
block_nrm[j] = norm[j];
}
}
else if (norm_type == L2)
{
for (int i = 0; i < (vec.size() / bdim); i++)
for (int j = 0; j < bdim; j++)
{
norm[j] += vec[(offset + i) * bdim + j] * vec[(offset + i) * bdim + j];
}
for (int j = 0; j < bdim; j++)
{
block_nrm[j] = sqrt(norm[j]);
}
}
}
void check_norm(const int size, const int bdim, const NormType norm_type)
{
Matrix_h A;
//Workaround to test large vector sizes:
A.set_initialized(0);
A.set_block_dimx(bdim);
A.set_block_dimy(bdim);
A.set_num_nz(size);
A.set_num_rows(size);
A.set_num_cols(size);
A.set_initialized(1);
//Matrix_h A(size,size,size, bdim, bdim, 0);
//generateMatrixRandomStruct<TConfig_h>::generateExact(A, size_vec, true , bdim, false);
int offset = 0;
Vector_h vec(size);
vec.set_block_dimx(bdim);
fillRandom<Vector_h>::fill(vec);
Matrix<TConfig> A_try(A);
Vector<TConfig> vec_try(vec);
Vector_h norm_ref(bdim), norm_try(bdim);
test_get_norm(norm_ref, vec, norm_type, bdim, offset);
get_norm( A_try, vec_try, bdim, norm_type, norm_try );
this->PrintOnFail(": error in checking norm %s, blocksize %d, size %d\n", norm_type == L1 ? "L1" : "L2", bdim, size);
// summing on gpu and host might produce different numbers due to order of summation for L1, tuning numbers a little bit
UNITTEST_ASSERT_EQUAL_TOL(norm_ref, norm_try, getTolerance<typename Vector_h::value_type>::get()*(norm_type == L1 ? size : 1.));
}
void run()
{
randomize( 10 );
for (int bsize = 1; bsize <= 10; bsize ++)
{
int size = 10000 * bsize;
check_norm(size, bsize, L1);
check_norm(size, bsize, L2);
}
}
DECLARE_UNITTEST_END(NormTests);
#define AMGX_CASE_LINE(CASE) NormTests <TemplateMode<CASE>::Type> NormTests_##CASE;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} //namespace amgx
| 4f10f52129686cd220b86d08239f60047e485eae.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "unit_test.h"
#include "vector.h"
#include "matrix.h"
#include "norm.h"
#include "ctime"
namespace amgx
{
DECLARE_UNITTEST_BEGIN(NormTests);
void test_get_norm(Vector_h &block_nrm, const Vector_h &vec, const NormType norm_type, int bdim = 1, int offset = 0)
{
UNITTEST_ASSERT_TRUE_DESC("Only L1 and L2 are supported in this unit test", norm_type == L1 || norm_type == L2);
block_nrm.resize(bdim, ValueTypeB(0));
std::vector <typename Vector_h::value_type> norm(bdim, 0.l);
if (norm_type == L1)
{
for (int i = 0; i < (vec.size() / bdim); i++)
for (int j = 0; j < bdim; j++)
{
norm[j] += std::fabs(vec[(offset + i) * bdim + j]);
}
for (int j = 0; j < bdim; j++)
{
block_nrm[j] = norm[j];
}
}
else if (norm_type == L2)
{
for (int i = 0; i < (vec.size() / bdim); i++)
for (int j = 0; j < bdim; j++)
{
norm[j] += vec[(offset + i) * bdim + j] * vec[(offset + i) * bdim + j];
}
for (int j = 0; j < bdim; j++)
{
block_nrm[j] = sqrt(norm[j]);
}
}
}
void check_norm(const int size, const int bdim, const NormType norm_type)
{
Matrix_h A;
//Workaround to test large vector sizes:
A.set_initialized(0);
A.set_block_dimx(bdim);
A.set_block_dimy(bdim);
A.set_num_nz(size);
A.set_num_rows(size);
A.set_num_cols(size);
A.set_initialized(1);
//Matrix_h A(size,size,size, bdim, bdim, 0);
//generateMatrixRandomStruct<TConfig_h>::generateExact(A, size_vec, true , bdim, false);
int offset = 0;
Vector_h vec(size);
vec.set_block_dimx(bdim);
fillRandom<Vector_h>::fill(vec);
Matrix<TConfig> A_try(A);
Vector<TConfig> vec_try(vec);
Vector_h norm_ref(bdim), norm_try(bdim);
test_get_norm(norm_ref, vec, norm_type, bdim, offset);
get_norm( A_try, vec_try, bdim, norm_type, norm_try );
this->PrintOnFail(": error in checking norm %s, blocksize %d, size %d\n", norm_type == L1 ? "L1" : "L2", bdim, size);
// summing on gpu and host might produce different numbers due to order of summation for L1, tuning numbers a little bit
UNITTEST_ASSERT_EQUAL_TOL(norm_ref, norm_try, getTolerance<typename Vector_h::value_type>::get()*(norm_type == L1 ? size : 1.));
}
void run()
{
randomize( 10 );
for (int bsize = 1; bsize <= 10; bsize ++)
{
int size = 10000 * bsize;
check_norm(size, bsize, L1);
check_norm(size, bsize, L2);
}
}
DECLARE_UNITTEST_END(NormTests);
#define AMGX_CASE_LINE(CASE) NormTests <TemplateMode<CASE>::Type> NormTests_##CASE;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} //namespace amgx
|
5e5f5bea5292ae99fd7ad8a2210823a6760a5f47.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <assert.h>
#include <malloc.h>
#include <math.h>
#include <stdlib.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
#include "helper_cuda.h"
#ifndef BLOCKSIZE
#define BLOCKSIZE 128 // number of threads per block
#endif
#define NUMBLOCKS 256
__global__ void AutoCorr( float *dA, float *dSums )
{
int gid = blockIdx.x*blockDim.x + threadIdx.x;
int shift = gid;
float sum = 0.;
for( int i = 0; i < 32768; i++ )
{
sum += dA[i] * dA[i + shift];
}
dSums[shift] = sum;
}
int
main( int argc, char *argv[ ] )
{
// Reading Data from file
FILE *fp = fopen( "signal.txt", "r" );
if( fp == NULL )
{
fprintf( stderr, "Cannot open file 'signal.txt'\n" );
exit( 1 );
}
int Size;
fscanf( fp, "%d", &Size );
float *hA = new float[ 2*Size ];
float *hSums = new float[ 1*Size ];
for( int i = 0; i < Size; i++ )
{
fscanf( fp, "%f", &hA[i] );
hA[i+Size] = hA[i]; // duplicate the array
}
fclose( fp );
// Allocate Device memory
hipError_t status;
float *dA, *dSums;
status = hipMalloc( (void **)(&dA), 2*Size*sizeof(float) );
checkCudaErrors( status );
status = hipMalloc( (void **)(&dSums), Size*sizeof(float) );
checkCudaErrors( status );
// copy host memory to device
status = hipMemcpy( dA, hA, 2*Size*sizeof(float), hipMemcpyHostToDevice );
checkCudaErrors( status );
status = hipMemcpy( dSums, hSums, Size*sizeof(float), hipMemcpyHostToDevice );
checkCudaErrors( status );
// setup the execution parameters:
dim3 threads(BLOCKSIZE, 1, 1 );
dim3 grid( NUMBLOCKS, 1, 1 );
// create and start timer
hipDeviceSynchronize( );
// allocate CUDA events that we'll use for timing:
hipEvent_t start, stop;
status = hipEventCreate( &start );
checkCudaErrors( status );
status = hipEventCreate( &stop );
checkCudaErrors( status );
// record the start event:
status = hipEventRecord( start, NULL );
checkCudaErrors( status );
// execute the kernel:
hipLaunchKernelGGL(( AutoCorr), dim3(grid),dim3(threads) , 0, 0, dA,dSums);
// record the stop event:
status = hipEventRecord( stop, NULL );
checkCudaErrors( status );
// wait for the stop event to complete:
status = hipEventSynchronize( stop );
checkCudaErrors( status );
float msecTotal = 0.0f;
status = hipEventElapsedTime( &msecTotal, start, stop );
checkCudaErrors( status );
double secondsTotal = 0.001 * (double)msecTotal;
//compute performance
double Performance = 2*(double)Size*(double)Size/ ( secondsTotal ) / 1000000.;
fprintf( stderr,"Performance = %8.2lf MegaOperations/Sec\n", Performance );
//Saving the Sums array in a matlab m file
// std::ofstream FileTemp;
// FileTemp.open("FSums.m");
// FileTemp << "Sums = [";
// FileTemp << Sums[0];
// for( int k = 1; k < Size; k++)
// {
// FileTemp << "," << Sums[k];
// }
// FileTemp<<"];";
// FileTemp.close();
} | 5e5f5bea5292ae99fd7ad8a2210823a6760a5f47.cu | // System includes
#include <stdio.h>
#include <assert.h>
#include <malloc.h>
#include <math.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
#include "helper_cuda.h"
#ifndef BLOCKSIZE
#define BLOCKSIZE 128 // number of threads per block
#endif
#define NUMBLOCKS 256
__global__ void AutoCorr( float *dA, float *dSums )
{
int gid = blockIdx.x*blockDim.x + threadIdx.x;
int shift = gid;
float sum = 0.;
for( int i = 0; i < 32768; i++ )
{
sum += dA[i] * dA[i + shift];
}
dSums[shift] = sum;
}
int
main( int argc, char *argv[ ] )
{
// Reading Data from file
FILE *fp = fopen( "signal.txt", "r" );
if( fp == NULL )
{
fprintf( stderr, "Cannot open file 'signal.txt'\n" );
exit( 1 );
}
int Size;
fscanf( fp, "%d", &Size );
float *hA = new float[ 2*Size ];
float *hSums = new float[ 1*Size ];
for( int i = 0; i < Size; i++ )
{
fscanf( fp, "%f", &hA[i] );
hA[i+Size] = hA[i]; // duplicate the array
}
fclose( fp );
// Allocate Device memory
cudaError_t status;
float *dA, *dSums;
status = cudaMalloc( (void **)(&dA), 2*Size*sizeof(float) );
checkCudaErrors( status );
status = cudaMalloc( (void **)(&dSums), Size*sizeof(float) );
checkCudaErrors( status );
// copy host memory to device
status = cudaMemcpy( dA, hA, 2*Size*sizeof(float), cudaMemcpyHostToDevice );
checkCudaErrors( status );
status = cudaMemcpy( dSums, hSums, Size*sizeof(float), cudaMemcpyHostToDevice );
checkCudaErrors( status );
// setup the execution parameters:
dim3 threads(BLOCKSIZE, 1, 1 );
dim3 grid( NUMBLOCKS, 1, 1 );
// create and start timer
cudaDeviceSynchronize( );
// allocate CUDA events that we'll use for timing:
cudaEvent_t start, stop;
status = cudaEventCreate( &start );
checkCudaErrors( status );
status = cudaEventCreate( &stop );
checkCudaErrors( status );
// record the start event:
status = cudaEventRecord( start, NULL );
checkCudaErrors( status );
// execute the kernel:
AutoCorr<<< grid,threads >>>(dA,dSums);
// record the stop event:
status = cudaEventRecord( stop, NULL );
checkCudaErrors( status );
// wait for the stop event to complete:
status = cudaEventSynchronize( stop );
checkCudaErrors( status );
float msecTotal = 0.0f;
status = cudaEventElapsedTime( &msecTotal, start, stop );
checkCudaErrors( status );
double secondsTotal = 0.001 * (double)msecTotal;
//compute performance
double Performance = 2*(double)Size*(double)Size/ ( secondsTotal ) / 1000000.;
fprintf( stderr,"Performance = %8.2lf MegaOperations/Sec\n", Performance );
//Saving the Sums array in a matlab m file
// std::ofstream FileTemp;
// FileTemp.open("FSums.m");
// FileTemp << "Sums = [";
// FileTemp << Sums[0];
// for( int k = 1; k < Size; k++)
// {
// FileTemp << "," << Sums[k];
// }
// FileTemp<<"];";
// FileTemp.close();
} |
6ffd871ed824d00ff0c4324e231978f82ef90e00.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//---------------------------------*-CUDA-*----------------------------------//
// Copyright 2021-2023 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file orange/univ/SimpleUnitTracker.test.cu
//---------------------------------------------------------------------------//
#include "SimpleUnitTracker.test.hh"
#include "corecel/sys/KernelParamCalculator.device.hh"
namespace celeritas
{
namespace test
{
namespace
{
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
__global__ void initialize_kernel(ParamsRef<MemSpace::device> const params,
StateRef<MemSpace::device> const states)
{
auto tid = KernelParamCalculator::thread_id();
if (tid.get() >= states.size())
return;
InitializingExecutor<> calc_thread{params, states};
calc_thread(TrackSlotId{tid.unchecked_get()});
}
} // namespace
//---------------------------------------------------------------------------//
// TESTING INTERFACE
//---------------------------------------------------------------------------//
//! Run on device and return results
void test_initialize(ParamsRef<MemSpace::device> const& params,
StateRef<MemSpace::device> const& state)
{
CELER_LAUNCH_KERNEL(initialize,
device().default_block_size(),
state.size(),
0,
params,
state);
CELER_DEVICE_CALL_PREFIX(DeviceSynchronize());
}
//---------------------------------------------------------------------------//
} // namespace test
} // namespace celeritas
| 6ffd871ed824d00ff0c4324e231978f82ef90e00.cu | //---------------------------------*-CUDA-*----------------------------------//
// Copyright 2021-2023 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file orange/univ/SimpleUnitTracker.test.cu
//---------------------------------------------------------------------------//
#include "SimpleUnitTracker.test.hh"
#include "corecel/sys/KernelParamCalculator.device.hh"
namespace celeritas
{
namespace test
{
namespace
{
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
__global__ void initialize_kernel(ParamsRef<MemSpace::device> const params,
StateRef<MemSpace::device> const states)
{
auto tid = KernelParamCalculator::thread_id();
if (tid.get() >= states.size())
return;
InitializingExecutor<> calc_thread{params, states};
calc_thread(TrackSlotId{tid.unchecked_get()});
}
} // namespace
//---------------------------------------------------------------------------//
// TESTING INTERFACE
//---------------------------------------------------------------------------//
//! Run on device and return results
void test_initialize(ParamsRef<MemSpace::device> const& params,
StateRef<MemSpace::device> const& state)
{
CELER_LAUNCH_KERNEL(initialize,
device().default_block_size(),
state.size(),
0,
params,
state);
CELER_DEVICE_CALL_PREFIX(DeviceSynchronize());
}
//---------------------------------------------------------------------------//
} // namespace test
} // namespace celeritas
|
6ce4dd5a182da9d229ddeaf2e86c420dbdc58f0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void __l1dist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Lidist not supported on arch <= 200\n");
} | 6ce4dd5a182da9d229ddeaf2e86c420dbdc58f0e.cu | #include "includes.h"
__global__ void __l1dist(float *A, int lda, float *B, int ldb, float *C, int ldc, int d, int nrows, int ncols, float p) {
printf("Warning, Lidist not supported on arch <= 200\n");
} |
20336cec992f5005e750f79f3479708aee920ac4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/sgd/fp32_momentum_sgd_op.h"
namespace caffe2 {
namespace {
__global__ void FP32MomentumSGDKernel(
int N,
const float2* g,
const float2* m,
float2* ng,
float2* nm,
const float* lr,
const float mom,
bool nesterov,
const float wd,
float2* param) {
#if __CUDA_ARCH__ >= 530
const float lr2 = lr[0];
const float LR = lr2;
const float momentum = mom;
const float weight_decay = wd;
int n = N / 2;
if (!nesterov) {
CUDA_1D_KERNEL_LOOP(i, n) {
ng[i].x = __fmaf_rn(weight_decay, param[i].x, g[i].x);
ng[i].y = __fmaf_rn(weight_decay, param[i].y, g[i].y);
float2 mi_float2 = m[i];
float2 adjusted_gradient_float2;
adjusted_gradient_float2.x =
__fmaf_rn(LR, ng[i].x, __fmul_rn(momentum, mi_float2.x));
adjusted_gradient_float2.y =
__fmaf_rn(LR, ng[i].y, __fmul_rn(momentum, mi_float2.y));
nm[i] = adjusted_gradient_float2;
ng[i] = adjusted_gradient_float2;
if (param) {
param[i].x = __fsub_rn(param[i].x, adjusted_gradient_float2.x);
param[i].y = __fsub_rn(param[i].y, adjusted_gradient_float2.y);
}
}
} else {
CUDA_1D_KERNEL_LOOP(i, n) {
// computing the term (grad + lambda*weight)
// might need to change in case of denormalization
ng[i].x = __fmaf_rn(weight_decay, param[i].x, g[i].x);
ng[i].y = __fmaf_rn(weight_decay, param[i].y, g[i].y);
const float2 mi_float2 = m[i];
float2 mom_mi_float2;
mom_mi_float2.x = __fmul_rn(momentum, mi_float2.x);
mom_mi_float2.y = __fmul_rn(momentum, mi_float2.y);
float2 mi_new_float2;
mi_new_float2.x = __fmaf_rn(LR, ng[i].x, mom_mi_float2.x);
mi_new_float2.y = __fmaf_rn(LR, ng[i].y, mom_mi_float2.y);
nm[i] = mi_new_float2;
ng[i].x = __fsub_rn(
__fmaf_rn(mi_new_float2.x, momentum, mi_new_float2.x),
mom_mi_float2.x);
ng[i].y = __fsub_rn(
__fmaf_rn(mi_new_float2.y, momentum, mi_new_float2.y),
mom_mi_float2.y);
if (param) {
param[i].x = __fsub_rn(param[i].x, ng[i].x);
param[i].y = __fsub_rn(param[i].y, ng[i].y);
}
}
}
#else
CUDA_KERNEL_ASSERT(false);
#endif // CAFFE_HAS_CUDA_FP16
}
}
template <>
void fp32_momentum_sgd_update<CUDAContext>(
int N,
const float* g,
const float* m,
float* ng,
float* nm,
const float* lr,
float momentum,
bool nesterov,
float weight_decay,
float* param,
CUDAContext* context) {
hipLaunchKernelGGL(( FP32MomentumSGDKernel),
dim3(CAFFE_GET_BLOCKS(N / 2)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
N,
reinterpret_cast<const float2*>(g),
reinterpret_cast<const float2*>(m),
reinterpret_cast<float2*>(ng),
reinterpret_cast<float2*>(nm),
lr,
momentum,
nesterov,
weight_decay,
reinterpret_cast<float2*>(param));
// not setting N to N/2
// TODO_ check float performance vs float2
}
REGISTER_CUDA_OPERATOR(
FP32MomentumSGDUpdate,
FP32MomentumSGDUpdateOp<float, CUDAContext>);
OPERATOR_SCHEMA(FP32MomentumSGDUpdate)
.NumInputs(4)
.NumOutputs(3)
.AllowInplace({{0, 0}, {1, 1}, {3, 2}})
.TensorInferenceFunction([](const OperatorDef& /* unused */,
const vector<TensorShape>& in) {
vector<TensorShape> out(3);
out[0] = in[0];
out[1] = in[1];
out[2] = in[3];
return out;
})
.SetDoc(R"DOC(
Computes the momentum SGD update similarly to the MomentumSGDUpdateOp,
however this op also performs the weight decay update at the same time, thus
making it more efficient.
This op is also functionally equivalent to the FP16MomentumSGDUpdateOp, however
it expects FP32 data and performs its updates in FP32 precision.
)DOC");
}
| 20336cec992f5005e750f79f3479708aee920ac4.cu | #include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/sgd/fp32_momentum_sgd_op.h"
namespace caffe2 {
namespace {
__global__ void FP32MomentumSGDKernel(
int N,
const float2* g,
const float2* m,
float2* ng,
float2* nm,
const float* lr,
const float mom,
bool nesterov,
const float wd,
float2* param) {
#if __CUDA_ARCH__ >= 530
const float lr2 = lr[0];
const float LR = lr2;
const float momentum = mom;
const float weight_decay = wd;
int n = N / 2;
if (!nesterov) {
CUDA_1D_KERNEL_LOOP(i, n) {
ng[i].x = __fmaf_rn(weight_decay, param[i].x, g[i].x);
ng[i].y = __fmaf_rn(weight_decay, param[i].y, g[i].y);
float2 mi_float2 = m[i];
float2 adjusted_gradient_float2;
adjusted_gradient_float2.x =
__fmaf_rn(LR, ng[i].x, __fmul_rn(momentum, mi_float2.x));
adjusted_gradient_float2.y =
__fmaf_rn(LR, ng[i].y, __fmul_rn(momentum, mi_float2.y));
nm[i] = adjusted_gradient_float2;
ng[i] = adjusted_gradient_float2;
if (param) {
param[i].x = __fsub_rn(param[i].x, adjusted_gradient_float2.x);
param[i].y = __fsub_rn(param[i].y, adjusted_gradient_float2.y);
}
}
} else {
CUDA_1D_KERNEL_LOOP(i, n) {
// computing the term (grad + lambda*weight)
// might need to change in case of denormalization
ng[i].x = __fmaf_rn(weight_decay, param[i].x, g[i].x);
ng[i].y = __fmaf_rn(weight_decay, param[i].y, g[i].y);
const float2 mi_float2 = m[i];
float2 mom_mi_float2;
mom_mi_float2.x = __fmul_rn(momentum, mi_float2.x);
mom_mi_float2.y = __fmul_rn(momentum, mi_float2.y);
float2 mi_new_float2;
mi_new_float2.x = __fmaf_rn(LR, ng[i].x, mom_mi_float2.x);
mi_new_float2.y = __fmaf_rn(LR, ng[i].y, mom_mi_float2.y);
nm[i] = mi_new_float2;
ng[i].x = __fsub_rn(
__fmaf_rn(mi_new_float2.x, momentum, mi_new_float2.x),
mom_mi_float2.x);
ng[i].y = __fsub_rn(
__fmaf_rn(mi_new_float2.y, momentum, mi_new_float2.y),
mom_mi_float2.y);
if (param) {
param[i].x = __fsub_rn(param[i].x, ng[i].x);
param[i].y = __fsub_rn(param[i].y, ng[i].y);
}
}
}
#else
CUDA_KERNEL_ASSERT(false);
#endif // CAFFE_HAS_CUDA_FP16
}
}
template <>
void fp32_momentum_sgd_update<CUDAContext>(
int N,
const float* g,
const float* m,
float* ng,
float* nm,
const float* lr,
float momentum,
bool nesterov,
float weight_decay,
float* param,
CUDAContext* context) {
FP32MomentumSGDKernel<<<
CAFFE_GET_BLOCKS(N / 2),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
N,
reinterpret_cast<const float2*>(g),
reinterpret_cast<const float2*>(m),
reinterpret_cast<float2*>(ng),
reinterpret_cast<float2*>(nm),
lr,
momentum,
nesterov,
weight_decay,
reinterpret_cast<float2*>(param));
// not setting N to N/2
// TODO_ check float performance vs float2
}
REGISTER_CUDA_OPERATOR(
FP32MomentumSGDUpdate,
FP32MomentumSGDUpdateOp<float, CUDAContext>);
OPERATOR_SCHEMA(FP32MomentumSGDUpdate)
.NumInputs(4)
.NumOutputs(3)
.AllowInplace({{0, 0}, {1, 1}, {3, 2}})
.TensorInferenceFunction([](const OperatorDef& /* unused */,
const vector<TensorShape>& in) {
vector<TensorShape> out(3);
out[0] = in[0];
out[1] = in[1];
out[2] = in[3];
return out;
})
.SetDoc(R"DOC(
Computes the momentum SGD update similarly to the MomentumSGDUpdateOp,
however this op also performs the weight decay update at the same time, thus
making it more efficient.
This op is also functionally equivalent to the FP16MomentumSGDUpdateOp, however
it expects FP32 data and performs its updates in FP32 precision.
)DOC");
}
|
8836afef22267ff642fdc41647c6ad1deaf7a5f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
__device__ inline float getInterval(float sample,
int index,
int inputSize,
int outputSize,
int poolSize) {
float alpha = (float)(inputSize - poolSize) / (float) (outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return (int) ((index + sample) * alpha) - (int) (sample * alpha);
}
}
// We template on poolSizeW to allow the innermost loop to be unrolled
template <int PoolSizeWStatic>
__global__ void SpatialFractionalMaxPooling_updateOutput(
THCDeviceTensor<float, 4> input,
THCDeviceTensor<float, 4> output,
THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 3> samples,
int poolSizeW, int poolSizeH) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.getSize(2) * output.getSize(3)) {
int outputW = ourOutputPoint % output.getSize(3);
int outputH = ourOutputPoint / output.getSize(3);
int poolW = getInterval(samples[batch][plane][0], outputW,
input.getSize(3), output.getSize(3), poolSizeW);
int poolH = getInterval(samples[batch][plane][1], outputH,
input.getSize(2), output.getSize(2), poolSizeH);
float maxVal = -THInf;
int maxIndex = -1;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (PoolSizeWStatic == -1) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
float val = input[batch][plane][h][w];
maxVal = fmaxf(val, maxVal);
maxIndex = (maxVal == val) ? (h * input.getSize(3) + w) : maxIndex;
}
} else {
#pragma unroll
for (int i = 0; i < PoolSizeWStatic; ++i) {
int w = i + poolW;
float val = input[batch][plane][h][w];
maxVal = fmaxf(val, maxVal);
maxIndex = (maxVal == val) ? (h * input.getSize(3) + w) : maxIndex;
}
}
}
assert(maxVal != -THInf);
assert(maxIndex != -1);
// +1 for Lua index
indices[batch][plane][outputH][outputW] = maxIndex + 1;
output[batch][plane][outputH][outputW] = maxVal;
}
}
void THNN_CudaSpatialFractionalMaxPooling_updateOutput(
THCState *state,
THCudaTensor *input,
THCudaTensor *output,
int outputW, int outputH,
int poolSizeW, int poolSizeH,
THCudaTensor *indices,
THCudaTensor *randomSamples)
{
int planeDim = 0;
int dimh = 1;
int dimw = 2;
long numBatch = 1;
long numInputDims = THCudaTensor_nDimension(state, input);
THArgCheck(numInputDims == 3 || numInputDims == 4, 2,
"3D or 4D (batch mode) tensor expected");
if (numInputDims == 4) {
numBatch = THCudaTensor_size(state, input, 0);
planeDim++;
dimh++;
dimw++;
}
/* sizes */
long numPlanes = THCudaTensor_size(state, input, planeDim);
long inputH = THCudaTensor_size(state, input, dimh);
long inputW = THCudaTensor_size(state, input, dimw);
THArgCheck(outputH + poolSizeH - 1 < inputH, 6,
"poolSizeH too large relative to input height");
THArgCheck(outputW + poolSizeW - 1 < inputW, 5,
"poolSizeW too large relative to input width");
THCDeviceTensor<float, 4> devInput;
THCDeviceTensor<float, 4> devOutput;
THCDeviceTensor<float, 4> devIndices;
THCDeviceTensor<float, 3> devSamples =
toDeviceTensor<float, 3>(state, randomSamples);
if (numInputDims == 3) {
/* resize output */
THCudaTensor_resize3d(state, output, numPlanes, outputH, outputW);
/* indices will contain the locations for each output point */
THCudaTensor_resize3d(state, indices, numPlanes, outputH, outputW);
devInput = toDeviceTensor<float, 3>(state, input).upcastOuter<4>();
devOutput = toDeviceTensor<float, 3>(state, output).upcastOuter<4>();
devIndices = toDeviceTensor<float, 3>(state, indices).upcastOuter<4>();
} else {
THCudaTensor_resize4d(state, output, numBatch, numPlanes, outputH, outputW);
/* indices will contain the locations for each output point */
THCudaTensor_resize4d(state, indices, numBatch, numPlanes, outputH, outputW);
devInput = toDeviceTensor<float, 4>(state, input);
devOutput = toDeviceTensor<float, 4>(state, output);
devIndices = toDeviceTensor<float, 4>(state, indices);
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3);
dim3 grid(THCCeilDiv(outputPlaneSize, 128),
devInput.getSize(1),
devInput.getSize(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
#define SFMP_UPDATE_OUTPUT(POOL_W) \
hipLaunchKernelGGL(( SpatialFractionalMaxPooling_updateOutput<POOL_W>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
devInput, devOutput, devIndices, devSamples, poolSizeW, poolSizeH);
#define SFMP_UPDATE_OUTPUT_CASE(POOL_W) \
case POOL_W: SFMP_UPDATE_OUTPUT(POOL_W); break
switch (poolSizeW) {
SFMP_UPDATE_OUTPUT_CASE(2);
SFMP_UPDATE_OUTPUT_CASE(3);
SFMP_UPDATE_OUTPUT_CASE(4);
SFMP_UPDATE_OUTPUT_CASE(5);
SFMP_UPDATE_OUTPUT_CASE(6);
SFMP_UPDATE_OUTPUT_CASE(7);
default:
// dynamic pool width
SFMP_UPDATE_OUTPUT_CASE(-1);
}
}
__global__ void SpatialFractionalMaxPooling_updateGradInput(
THCDeviceTensor<float, 4> gradInput,
THCDeviceTensor<float, 4> gradOutput,
THCDeviceTensor<float, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.getSize(2) * gradOutput.getSize(3)) {
int outputW = ourOutputPoint % gradOutput.getSize(3);
int outputH = ourOutputPoint / gradOutput.getSize(3);
int index = indices[batch][plane][outputH][outputW] - 1;
assert(index >= 0);
int inputW = index % gradInput.getSize(3);
int inputH = index / gradInput.getSize(3);
assert(inputH < gradInput.getSize(2));
atomicAdd(gradInput[batch][plane][inputH][inputW].data(),
gradOutput[batch][plane][outputH][outputW]);
}
}
void THNN_CudaSpatialFractionalMaxPooling_updateGradInput(
THCState *state,
THCudaTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradInput,
int outputW, int outputH,
int poolSizeW, int poolSizeH,
THCudaTensor *indices)
{
int dimh = 1;
int dimw = 2;
long numInputDims = THCudaTensor_nDimension(state, input);
if (numInputDims == 4) {
dimh++;
dimw++;
}
/* sizes */
long inputH = THCudaTensor_size(state, input, dimh);
long inputW = THCudaTensor_size(state, input, dimw);
THArgCheck(outputH == THCudaTensor_size(state, gradOutput, dimh), 3,
"gradOutput height unexpected");
THArgCheck(outputW == THCudaTensor_size(state, gradOutput, dimw), 3,
"gradOutput width unexpected");
/* resize */
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
THCDeviceTensor<float, 4> devGradInput;
THCDeviceTensor<float, 4> devGradOutput;
THCDeviceTensor<float, 4> devIndices;
/* backprop */
if (numInputDims == 3) {
devGradInput = toDeviceTensor<float, 3>(state, gradInput).upcastOuter<4>();
devGradOutput = toDeviceTensor<float, 3>(state, gradOutput).upcastOuter<4>();
devIndices = toDeviceTensor<float, 3>(state, indices).upcastOuter<4>();
} else {
devGradInput = toDeviceTensor<float, 4>(state, gradInput);
devGradOutput = toDeviceTensor<float, 4>(state, gradOutput);
devIndices = toDeviceTensor<float, 4>(state, indices);
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3);
dim3 grid(THCCeilDiv(outputPlaneSize, 128),
devGradInput.getSize(1),
devGradInput.getSize(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
hipLaunchKernelGGL(( SpatialFractionalMaxPooling_updateGradInput)
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
devGradInput, devGradOutput, devIndices);
}
| 8836afef22267ff642fdc41647c6ad1deaf7a5f9.cu | #include "THCUNN.h"
#include "common.h"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
__device__ inline float getInterval(float sample,
int index,
int inputSize,
int outputSize,
int poolSize) {
float alpha = (float)(inputSize - poolSize) / (float) (outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return (int) ((index + sample) * alpha) - (int) (sample * alpha);
}
}
// We template on poolSizeW to allow the innermost loop to be unrolled
template <int PoolSizeWStatic>
__global__ void SpatialFractionalMaxPooling_updateOutput(
THCDeviceTensor<float, 4> input,
THCDeviceTensor<float, 4> output,
THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 3> samples,
int poolSizeW, int poolSizeH) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.getSize(2) * output.getSize(3)) {
int outputW = ourOutputPoint % output.getSize(3);
int outputH = ourOutputPoint / output.getSize(3);
int poolW = getInterval(samples[batch][plane][0], outputW,
input.getSize(3), output.getSize(3), poolSizeW);
int poolH = getInterval(samples[batch][plane][1], outputH,
input.getSize(2), output.getSize(2), poolSizeH);
float maxVal = -THInf;
int maxIndex = -1;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (PoolSizeWStatic == -1) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
float val = input[batch][plane][h][w];
maxVal = fmaxf(val, maxVal);
maxIndex = (maxVal == val) ? (h * input.getSize(3) + w) : maxIndex;
}
} else {
#pragma unroll
for (int i = 0; i < PoolSizeWStatic; ++i) {
int w = i + poolW;
float val = input[batch][plane][h][w];
maxVal = fmaxf(val, maxVal);
maxIndex = (maxVal == val) ? (h * input.getSize(3) + w) : maxIndex;
}
}
}
assert(maxVal != -THInf);
assert(maxIndex != -1);
// +1 for Lua index
indices[batch][plane][outputH][outputW] = maxIndex + 1;
output[batch][plane][outputH][outputW] = maxVal;
}
}
void THNN_CudaSpatialFractionalMaxPooling_updateOutput(
THCState *state,
THCudaTensor *input,
THCudaTensor *output,
int outputW, int outputH,
int poolSizeW, int poolSizeH,
THCudaTensor *indices,
THCudaTensor *randomSamples)
{
int planeDim = 0;
int dimh = 1;
int dimw = 2;
long numBatch = 1;
long numInputDims = THCudaTensor_nDimension(state, input);
THArgCheck(numInputDims == 3 || numInputDims == 4, 2,
"3D or 4D (batch mode) tensor expected");
if (numInputDims == 4) {
numBatch = THCudaTensor_size(state, input, 0);
planeDim++;
dimh++;
dimw++;
}
/* sizes */
long numPlanes = THCudaTensor_size(state, input, planeDim);
long inputH = THCudaTensor_size(state, input, dimh);
long inputW = THCudaTensor_size(state, input, dimw);
THArgCheck(outputH + poolSizeH - 1 < inputH, 6,
"poolSizeH too large relative to input height");
THArgCheck(outputW + poolSizeW - 1 < inputW, 5,
"poolSizeW too large relative to input width");
THCDeviceTensor<float, 4> devInput;
THCDeviceTensor<float, 4> devOutput;
THCDeviceTensor<float, 4> devIndices;
THCDeviceTensor<float, 3> devSamples =
toDeviceTensor<float, 3>(state, randomSamples);
if (numInputDims == 3) {
/* resize output */
THCudaTensor_resize3d(state, output, numPlanes, outputH, outputW);
/* indices will contain the locations for each output point */
THCudaTensor_resize3d(state, indices, numPlanes, outputH, outputW);
devInput = toDeviceTensor<float, 3>(state, input).upcastOuter<4>();
devOutput = toDeviceTensor<float, 3>(state, output).upcastOuter<4>();
devIndices = toDeviceTensor<float, 3>(state, indices).upcastOuter<4>();
} else {
THCudaTensor_resize4d(state, output, numBatch, numPlanes, outputH, outputW);
/* indices will contain the locations for each output point */
THCudaTensor_resize4d(state, indices, numBatch, numPlanes, outputH, outputW);
devInput = toDeviceTensor<float, 4>(state, input);
devOutput = toDeviceTensor<float, 4>(state, output);
devIndices = toDeviceTensor<float, 4>(state, indices);
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3);
dim3 grid(THCCeilDiv(outputPlaneSize, 128),
devInput.getSize(1),
devInput.getSize(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
#define SFMP_UPDATE_OUTPUT(POOL_W) \
SpatialFractionalMaxPooling_updateOutput<POOL_W> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
devInput, devOutput, devIndices, devSamples, poolSizeW, poolSizeH);
#define SFMP_UPDATE_OUTPUT_CASE(POOL_W) \
case POOL_W: SFMP_UPDATE_OUTPUT(POOL_W); break
switch (poolSizeW) {
SFMP_UPDATE_OUTPUT_CASE(2);
SFMP_UPDATE_OUTPUT_CASE(3);
SFMP_UPDATE_OUTPUT_CASE(4);
SFMP_UPDATE_OUTPUT_CASE(5);
SFMP_UPDATE_OUTPUT_CASE(6);
SFMP_UPDATE_OUTPUT_CASE(7);
default:
// dynamic pool width
SFMP_UPDATE_OUTPUT_CASE(-1);
}
}
__global__ void SpatialFractionalMaxPooling_updateGradInput(
THCDeviceTensor<float, 4> gradInput,
THCDeviceTensor<float, 4> gradOutput,
THCDeviceTensor<float, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.getSize(2) * gradOutput.getSize(3)) {
int outputW = ourOutputPoint % gradOutput.getSize(3);
int outputH = ourOutputPoint / gradOutput.getSize(3);
int index = indices[batch][plane][outputH][outputW] - 1;
assert(index >= 0);
int inputW = index % gradInput.getSize(3);
int inputH = index / gradInput.getSize(3);
assert(inputH < gradInput.getSize(2));
atomicAdd(gradInput[batch][plane][inputH][inputW].data(),
gradOutput[batch][plane][outputH][outputW]);
}
}
void THNN_CudaSpatialFractionalMaxPooling_updateGradInput(
THCState *state,
THCudaTensor *input,
THCudaTensor *gradOutput,
THCudaTensor *gradInput,
int outputW, int outputH,
int poolSizeW, int poolSizeH,
THCudaTensor *indices)
{
int dimh = 1;
int dimw = 2;
long numInputDims = THCudaTensor_nDimension(state, input);
if (numInputDims == 4) {
dimh++;
dimw++;
}
/* sizes */
long inputH = THCudaTensor_size(state, input, dimh);
long inputW = THCudaTensor_size(state, input, dimw);
THArgCheck(outputH == THCudaTensor_size(state, gradOutput, dimh), 3,
"gradOutput height unexpected");
THArgCheck(outputW == THCudaTensor_size(state, gradOutput, dimw), 3,
"gradOutput width unexpected");
/* resize */
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
THCDeviceTensor<float, 4> devGradInput;
THCDeviceTensor<float, 4> devGradOutput;
THCDeviceTensor<float, 4> devIndices;
/* backprop */
if (numInputDims == 3) {
devGradInput = toDeviceTensor<float, 3>(state, gradInput).upcastOuter<4>();
devGradOutput = toDeviceTensor<float, 3>(state, gradOutput).upcastOuter<4>();
devIndices = toDeviceTensor<float, 3>(state, indices).upcastOuter<4>();
} else {
devGradInput = toDeviceTensor<float, 4>(state, gradInput);
devGradOutput = toDeviceTensor<float, 4>(state, gradOutput);
devIndices = toDeviceTensor<float, 4>(state, indices);
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3);
dim3 grid(THCCeilDiv(outputPlaneSize, 128),
devGradInput.getSize(1),
devGradInput.getSize(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
SpatialFractionalMaxPooling_updateGradInput
<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
devGradInput, devGradOutput, devIndices);
}
|
834edf8a16463282c9b96ee0388580b1a583b74c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <sys/time.h>
#define P (1<<14)
__global__ void copymat_x(int m, int n, int* A, int *B)
{
int idx, ix;
int iy = threadIdx.y + blockIdx.y*blockDim.y;
if (iy < n)
for(ix = 0; ix < P; ix++) {
idx = iy*m + ix;
B[idx] = A[idx];
}
}
__global__ void copymat_y(int m, int n, int* A, int *B)
{
int ix = threadIdx.x + blockIdx.x*blockDim.x;
int idx, iy;
if (ix < m)
for(iy = 0; iy < P; iy++) {
idx = iy*m + ix;
B[idx] = A[idx];
}
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp,NULL);
return (double) tp.tv_sec + (double)tp.tv_usec*1e-6;
}
int main(int argc, char** argv)
{
int *A, *B;
int *dev_A, *dev_B;
size_t m, n, nbytes;
double etime, start;
m = 1 << 14;
n = 1 << 14;
nbytes = m*n*sizeof(int);
printf("P = %d\n",P);
A = (int*) malloc(nbytes);
B = (int*) malloc(nbytes);
memset(A,0,nbytes);
hipMalloc((void**) &dev_A, nbytes);
hipMalloc((void**) &dev_B, nbytes);
hipMemcpy(dev_A, A, nbytes, hipMemcpyHostToDevice);
#if 0
/* One thread per row */
dim3 block(1,32);
dim3 grid(1,(n+block.y-1)/block.y);
start = cpuSecond();
hipLaunchKernelGGL(( copymat_x), dim3(grid),dim3(block), 0, 0, m,n,dev_A, dev_B);
#else
/* One thread per column */
dim3 block(32,1);
dim3 grid((m+block.x-1)/block.x,1);
start = cpuSecond();
hipLaunchKernelGGL(( copymat_y), dim3(grid),dim3(block), 0, 0, m,n,dev_A, dev_B);
#endif
hipDeviceSynchronize();
etime = cpuSecond() - start;
printf("GPU Kernel %10.3g (s)\n",etime);
hipFree(dev_A);
hipFree(dev_B);
free(A);
free(B);
hipDeviceReset();
}
| 834edf8a16463282c9b96ee0388580b1a583b74c.cu | #include <stdio.h>
#include <sys/time.h>
#define P (1<<14)
__global__ void copymat_x(int m, int n, int* A, int *B)
{
int idx, ix;
int iy = threadIdx.y + blockIdx.y*blockDim.y;
if (iy < n)
for(ix = 0; ix < P; ix++) {
idx = iy*m + ix;
B[idx] = A[idx];
}
}
__global__ void copymat_y(int m, int n, int* A, int *B)
{
int ix = threadIdx.x + blockIdx.x*blockDim.x;
int idx, iy;
if (ix < m)
for(iy = 0; iy < P; iy++) {
idx = iy*m + ix;
B[idx] = A[idx];
}
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp,NULL);
return (double) tp.tv_sec + (double)tp.tv_usec*1e-6;
}
int main(int argc, char** argv)
{
int *A, *B;
int *dev_A, *dev_B;
size_t m, n, nbytes;
double etime, start;
m = 1 << 14;
n = 1 << 14;
nbytes = m*n*sizeof(int);
printf("P = %d\n",P);
A = (int*) malloc(nbytes);
B = (int*) malloc(nbytes);
memset(A,0,nbytes);
cudaMalloc((void**) &dev_A, nbytes);
cudaMalloc((void**) &dev_B, nbytes);
cudaMemcpy(dev_A, A, nbytes, cudaMemcpyHostToDevice);
#if 0
/* One thread per row */
dim3 block(1,32);
dim3 grid(1,(n+block.y-1)/block.y);
start = cpuSecond();
copymat_x<<<grid,block>>>(m,n,dev_A, dev_B);
#else
/* One thread per column */
dim3 block(32,1);
dim3 grid((m+block.x-1)/block.x,1);
start = cpuSecond();
copymat_y<<<grid,block>>>(m,n,dev_A, dev_B);
#endif
cudaDeviceSynchronize();
etime = cpuSecond() - start;
printf("GPU Kernel %10.3g (s)\n",etime);
cudaFree(dev_A);
cudaFree(dev_B);
free(A);
free(B);
cudaDeviceReset();
}
|
cfe59cd65f10d5fef1933af2ba3b9ca3d0d4c48e.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#define MEGABYTE (1024 * 1024)
int main(int argc, char **argv)
{
// set up device
int dev = 0;
CHECK(hipSetDevice(dev));
if (argc != 2) {
printf("usage: %s <size-in-mbs>\n", argv[0]);
return 1;
}
// memory size
int n_mbs = atoi(argv[1]);
unsigned int nbytes = n_mbs * MEGABYTE;
// get device information
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting at ", argv[0]);
printf("device %d: %s nbyte %5.2fMB\n", dev,
deviceProp.name, nbytes / (1024.0f * 1024.0f));
// allocate the host memory
double start = seconds();
float *h_a = (float *)malloc(nbytes);
double elapsed = seconds() - start;
printf("Host memory allocation took %2.10f us\n", elapsed * 1000000.0);
// allocate the device memory
float *d_a;
CHECK(hipMalloc((float **)&d_a, nbytes));
// initialize the host memory
for(unsigned int i = 0; i < nbytes / sizeof(float); i++) h_a[i] = 0.5f;
// transfer data from the host to the device
CHECK(hipMemcpy(d_a, h_a, nbytes, hipMemcpyHostToDevice));
// transfer data from the device to the host
CHECK(hipMemcpy(h_a, d_a, nbytes, hipMemcpyDeviceToHost));
// free memory
CHECK(hipFree(d_a));
start = seconds();
free(h_a);
elapsed = seconds() - start;
printf("Host memory deallocation took %2.10f us\n", elapsed * 1000000.0);
// reset device
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| cfe59cd65f10d5fef1933af2ba3b9ca3d0d4c48e.cu | #include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
#define MEGABYTE (1024 * 1024)
int main(int argc, char **argv)
{
// set up device
int dev = 0;
CHECK(cudaSetDevice(dev));
if (argc != 2) {
printf("usage: %s <size-in-mbs>\n", argv[0]);
return 1;
}
// memory size
int n_mbs = atoi(argv[1]);
unsigned int nbytes = n_mbs * MEGABYTE;
// get device information
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting at ", argv[0]);
printf("device %d: %s nbyte %5.2fMB\n", dev,
deviceProp.name, nbytes / (1024.0f * 1024.0f));
// allocate the host memory
double start = seconds();
float *h_a = (float *)malloc(nbytes);
double elapsed = seconds() - start;
printf("Host memory allocation took %2.10f us\n", elapsed * 1000000.0);
// allocate the device memory
float *d_a;
CHECK(cudaMalloc((float **)&d_a, nbytes));
// initialize the host memory
for(unsigned int i = 0; i < nbytes / sizeof(float); i++) h_a[i] = 0.5f;
// transfer data from the host to the device
CHECK(cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice));
// transfer data from the device to the host
CHECK(cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDeviceToHost));
// free memory
CHECK(cudaFree(d_a));
start = seconds();
free(h_a);
elapsed = seconds() - start;
printf("Host memory deallocation took %2.10f us\n", elapsed * 1000000.0);
// reset device
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
371beb49e2b201857195d19bdfe7f15e8108f0dc.hip | // !!! This is a file automatically generated by hipify!!!
/********************************************************************
euler3d.cpp
: parallelized code of CFD
- original code from the AIAA-2009-4001 by Andrew Corrigan, acorriga@gmu.edu
- parallelization with OpenCL API has been applied by
Jianbin Fang - j.fang@tudelft.nl
Delft University of Technology
Faculty of Electrical Engineering, Mathematics and Computer Science
Department of Software Technology
Parallel and Distributed Systems Group
on 24/03/2011
********************************************************************/
#include <iostream>
#include <fstream>
#include <math.h>
#include <hip/hip_runtime.h>
#include "util.h"
/*
* Options
*
*/
#define GAMMA 1.4f
#define iterations 2000
#ifndef block_length
#define block_length 192
#endif
#define NDIM 3
#define NNB 4
#define RK 3 // 3rd order RK
#define ff_mach 1.2f
#define deg_angle_of_attack 0.0f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
#if block_length > 128
#warning "the kernels may fail too launch on some systems if the block length is too large"
#endif
double get_time() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
//self-defined user type
typedef struct{
float x;
float y;
float z;
} Float3;
__device__
inline void compute_velocity(const float density, const Float3 momentum, Float3* velocity){
velocity->x = momentum.x / density;
velocity->y = momentum.y / density;
velocity->z = momentum.z / density;
}
__device__
inline float compute_speed_sqd(const Float3 velocity){
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__device__
inline float compute_pressure(const float density, const float density_energy, const float speed_sqd){
return ((float)(GAMMA) - (float)(1.0f))*(density_energy - (float)(0.5f)*density*speed_sqd);
}
// sqrt is a device function
__device__
inline float compute_speed_of_sound(const float density, const float pressure){
return sqrt((float)(GAMMA)*pressure/density);
}
__device__ __host__
inline void compute_flux_contribution(const float density,
Float3 momentum,
const float density_energy,
const float pressure,
const Float3 velocity,
Float3* fc_momentum_x,
Float3* fc_momentum_y,
Float3* fc_momentum_z,
Float3* fc_density_energy)
{
fc_momentum_x->x = velocity.x*momentum.x + pressure;
fc_momentum_x->y = velocity.x*momentum.y;
fc_momentum_x->z = velocity.x*momentum.z;
fc_momentum_y->x = fc_momentum_x->y;
fc_momentum_y->y = velocity.y*momentum.y + pressure;
fc_momentum_y->z = velocity.y*momentum.z;
fc_momentum_z->x = fc_momentum_x->z;
fc_momentum_z->y = fc_momentum_y->z;
fc_momentum_z->z = velocity.z*momentum.z + pressure;
const float de_p = density_energy+pressure;
fc_density_energy->x = velocity.x*de_p;
fc_density_energy->y = velocity.y*de_p;
fc_density_energy->z = velocity.z*de_p;
}
void copy(float* dst, const float* src, const int N){
hipMemcpy(dst, src, N*sizeof(float), hipMemcpyDeviceToDevice);
}
void dump(const float *h_variables, const int nel, const int nelr){
{
std::ofstream file("density");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl;
}
{
std::ofstream file("momentum");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++)
{
for(int j = 0; j != NDIM; j++)
file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " ";
file << std::endl;
}
}
{
std::ofstream file("density_energy");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl;
}
}
__global__ void initialize_buffer(float *d, const float val, const int nelr)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
if (i < nelr) d[i] = val;
}
__global__ void initialize_variables(const int nelr, float* variables, const float* ff_variable)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int j = 0; j < NVAR; j++)
variables[i + j*nelr] = ff_variable[j];
}
__global__ void compute_step_factor(const int nelr,
float* variables,
float* areas,
float* step_factors){
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
if( i >= nelr) return;
float density = variables[i + VAR_DENSITY*nelr];
Float3 momentum;
momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr];
Float3 velocity; compute_velocity(density, momentum, &velocity);
float speed_sqd = compute_speed_sqd(velocity);
float pressure = compute_pressure(density, density_energy, speed_sqd);
float speed_of_sound = compute_speed_of_sound(density, pressure);
step_factors[i] = (float)(0.5f) / (sqrt(areas[i]) * (sqrt(speed_sqd) + speed_of_sound));
}
__global__ void
compute_flux(
int nelr,
int* elements_surrounding_elements,
float* normals,
float* variables,
float* ff_variable,
float* fluxes,
Float3* ff_flux_contribution_density_energy,
Float3* ff_flux_contribution_momentum_x,
Float3* ff_flux_contribution_momentum_y,
Float3* ff_flux_contribution_momentum_z){
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
if( i >= nelr) return;
const float smoothing_coefficient = (float)(0.2f);
int j, nb;
Float3 normal;
float normal_len;
float factor;
float density_i = variables[i + VAR_DENSITY*nelr];
Float3 momentum_i;
momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
Float3 velocity_i;
compute_velocity(density_i, momentum_i, &velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
//float speed_sqd_i;
//compute_speed_sqd(velocity_i, speed_sqd_i);
float speed_i = sqrt(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
Float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
Float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i,
&flux_contribution_i_momentum_x, &flux_contribution_i_momentum_y,
&flux_contribution_i_momentum_z, &flux_contribution_i_density_energy);
float flux_i_density = (float)(0.0f);
Float3 flux_i_momentum;
flux_i_momentum.x = (float)(0.0f);
flux_i_momentum.y = (float)(0.0f);
flux_i_momentum.z = (float)(0.0f);
float flux_i_density_energy = (float)(0.0f);
Float3 velocity_nb;
float density_nb, density_energy_nb;
Float3 momentum_nb;
Float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
Float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < NNB; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
normal.x = normals[i + (j + 0*NNB)*nelr];
normal.y = normals[i + (j + 1*NNB)*nelr];
normal.z = normals[i + (j + 2*NNB)*nelr];
normal_len = sqrt(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
density_nb = variables[nb + VAR_DENSITY*nelr];
momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
compute_velocity(density_nb, momentum_nb, &velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb,
&flux_contribution_nb_momentum_x, &flux_contribution_nb_momentum_y, &flux_contribution_nb_momentum_z,
&flux_contribution_nb_density_energy);
// artificial viscosity
factor = -normal_len*smoothing_coefficient*(float)(0.5f)*(speed_i + sqrt(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = (float)(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = (float)(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = (float)(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
else if(nb == -1) // a wing boundary
{
flux_i_momentum.x += normal.x*pressure_i;
flux_i_momentum.y += normal.y*pressure_i;
flux_i_momentum.z += normal.z*pressure_i;
}
else if(nb == -2) // a far field boundary
{
factor = (float)(0.5f)*normal.x;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x);
factor = (float)(0.5f)*normal.y;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y);
factor = (float)(0.5f)*normal.z;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
}
__global__ void
time_step(int j, int nelr,
const float* old_variables,
float* variables,
const float* step_factors,
const float* fluxes) {
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
if( i >= nelr) return;
float factor = step_factors[i]/(float)(RK+1-j);
variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr];
variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr];
variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr];
variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr];
variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr];
}
/*
* Main function
*/
int main(int argc, char** argv){
printf("WG size of kernel:initialize = %d\nWG size of kernel:compute_step_factor = %d\nWG size of kernel:compute_flux = %d\nWG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4);
if (argc < 2){
std::cout << "Please specify data file name" << std::endl;
return 0;
}
const char* data_file_name = argv[1];
float h_ff_variable[NVAR];
// set far field conditions and load them into constant memory on the gpu
//{
const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack);
h_ff_variable[VAR_DENSITY] = float(1.4);
float ff_pressure = float(1.0f);
float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]);
float ff_speed = float(ff_mach)*ff_speed_of_sound;
Float3 ff_velocity;
ff_velocity.x = ff_speed*float(cos((float)angle_of_attack));
ff_velocity.y = ff_speed*float(sin((float)angle_of_attack));
ff_velocity.z = 0.0f;
h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x;
h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y;
h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z;
h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f));
Float3 h_ff_momentum;
h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0);
h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1);
h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2);
Float3 h_ff_flux_contribution_momentum_x;
Float3 h_ff_flux_contribution_momentum_y;
Float3 h_ff_flux_contribution_momentum_z;
Float3 h_ff_flux_contribution_density_energy;
compute_flux_contribution(h_ff_variable[VAR_DENSITY],
h_ff_momentum,
h_ff_variable[VAR_DENSITY_ENERGY],
ff_pressure,
ff_velocity,
&h_ff_flux_contribution_momentum_x,
&h_ff_flux_contribution_momentum_y,
&h_ff_flux_contribution_momentum_z,
&h_ff_flux_contribution_density_energy);
int nel;
int nelr;
std::ifstream file(data_file_name, std::ifstream::in);
if(!file.good()){
throw(std::string("can not find/open file! ")+data_file_name);
}
file >> nel;
nelr = block_length*((nel / block_length )+ ::min(1, nel % block_length));
std::cout<<"--cambine: nel="<<nel<<", nelr="<<nelr<<std::endl;
float* h_areas = new float[nelr];
int* h_elements_surrounding_elements = new int[nelr*NNB];
float* h_normals = new float[nelr*NDIM*NNB];
// read in data
for(int i = 0; i < nel; i++)
{
file >> h_areas[i];
for(int j = 0; j < NNB; j++)
{
file >> h_elements_surrounding_elements[i + j*nelr];
if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1;
h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering
for(int k = 0; k < NDIM; k++)
{
file >> h_normals[i + (j + k*NNB)*nelr];
h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr];
}
}
}
// fill in remaining data
int last = nel-1;
for(int i = nel; i < nelr; i++)
{
h_areas[i] = h_areas[last];
for(int j = 0; j < NNB; j++)
{
// duplicate the last element
h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr];
for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr];
}
}
float* h_variables = new float[nelr*NVAR];
#ifdef DEBUG
float* h_step_factors = new float[nelr];
#endif
double offload_start = get_time();
float *d_ff_variable;
Float3 *d_ff_flux_contribution_momentum_x;
Float3 *d_ff_flux_contribution_momentum_y;
Float3 *d_ff_flux_contribution_momentum_z;
Float3 *d_ff_flux_contribution_density_energy;
hipMalloc((void**)&d_ff_variable, sizeof(float)*NVAR);
hipMemcpy(d_ff_variable, h_ff_variable, sizeof(float)*NVAR, hipMemcpyHostToDevice);
hipMalloc((void**)&d_ff_flux_contribution_momentum_x, sizeof(Float3));
hipMemcpy(d_ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(Float3), hipMemcpyHostToDevice);
hipMalloc((void**)&d_ff_flux_contribution_momentum_y, sizeof(Float3));
hipMemcpy(d_ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(Float3), hipMemcpyHostToDevice);
hipMalloc((void**)&d_ff_flux_contribution_momentum_z, sizeof(Float3));
hipMemcpy(d_ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(Float3), hipMemcpyHostToDevice);
hipMalloc((void**)&d_ff_flux_contribution_density_energy, sizeof(Float3));
hipMemcpy(d_ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(Float3), hipMemcpyHostToDevice);
float* d_areas;
hipMalloc((void**)&d_areas, sizeof(float)*nelr);
hipMemcpy(d_areas, h_areas, sizeof(float)*nelr, hipMemcpyHostToDevice);
float* d_normals;
hipMalloc((void**)&d_normals, sizeof(float)*nelr*NDIM*NNB);
hipMemcpy(d_normals, h_normals, sizeof(float)*nelr*NDIM*NNB, hipMemcpyHostToDevice);
int* d_elements_surrounding_elements;
hipMalloc((void**)&d_elements_surrounding_elements, sizeof(int)*nelr*NNB);
hipMemcpy(d_elements_surrounding_elements, h_elements_surrounding_elements, sizeof(int)*nelr*NNB, hipMemcpyHostToDevice);
// Create arrays and set initial conditions
float* d_variables;
hipMalloc((void**)&d_variables, sizeof(float)*nelr*NVAR);
float* d_old_variables;
hipMalloc((void**)&d_old_variables, sizeof(float)*nelr*NVAR);
float* d_fluxes;
hipMalloc((void**)&d_fluxes, sizeof(float)*nelr*NVAR);
float* d_step_factors;
hipMalloc((void**)&d_step_factors, sizeof(float)*nelr);
dim3 gridDim1 ((nelr + BLOCK_SIZE_1 - 1)/BLOCK_SIZE_1);
dim3 gridDim2 ((nelr + BLOCK_SIZE_2 - 1)/BLOCK_SIZE_2);
dim3 gridDim3 ((nelr + BLOCK_SIZE_3 - 1)/BLOCK_SIZE_3);
dim3 gridDim4 ((nelr + BLOCK_SIZE_4 - 1)/BLOCK_SIZE_4);
hipDeviceSynchronize();
double kernel_start = get_time();
hipLaunchKernelGGL(( initialize_variables), dim3(gridDim1), dim3(BLOCK_SIZE_1), 0, 0, nelr, d_variables, d_ff_variable);
hipLaunchKernelGGL(( initialize_variables), dim3(gridDim1), dim3(BLOCK_SIZE_1), 0, 0, nelr, d_old_variables, d_ff_variable);
hipLaunchKernelGGL(( initialize_variables), dim3(gridDim1), dim3(BLOCK_SIZE_1), 0, 0, nelr, d_fluxes, d_ff_variable);
hipLaunchKernelGGL(( initialize_buffer), dim3(gridDim1), dim3(BLOCK_SIZE_1), 0, 0, d_step_factors, 0, nelr);
// Begin iterations
for(int n = 0; n < iterations; n++){
copy(d_old_variables, d_variables, nelr*NVAR);
// for the first iteration we compute the time step
hipLaunchKernelGGL(( compute_step_factor), dim3(gridDim2), dim3(BLOCK_SIZE_2), 0, 0, nelr, d_variables, d_areas, d_step_factors);
#ifdef DEBUG
hipMemcpy(h_step_factors, d_step_factors, sizeof(float)*nelr, cudaMemDeviceToHost);
for (int i = 0; i < 16; i++) printf("step factor: i=%d %f\n", i, h_step_factors[i]);
#endif
for(int j = 0; j < RK; j++){
hipLaunchKernelGGL(( compute_flux), dim3(gridDim3), dim3(BLOCK_SIZE_3), 0, 0, nelr, d_elements_surrounding_elements, d_normals,
d_variables, d_ff_variable, d_fluxes, d_ff_flux_contribution_density_energy, \
d_ff_flux_contribution_momentum_x, d_ff_flux_contribution_momentum_y,
d_ff_flux_contribution_momentum_z);
hipLaunchKernelGGL(( time_step), dim3(gridDim4), dim3(BLOCK_SIZE_4), 0, 0, j, nelr, d_old_variables, d_variables, d_step_factors, d_fluxes);
}
}
hipDeviceSynchronize();
double kernel_end = get_time();
hipMemcpy(h_variables, d_variables, sizeof(float)*nelr*NVAR, hipMemcpyDeviceToHost);
hipFree(d_ff_variable);
hipFree(d_ff_flux_contribution_momentum_x);
hipFree(d_ff_flux_contribution_momentum_y);
hipFree(d_ff_flux_contribution_momentum_z);
hipFree(d_ff_flux_contribution_density_energy);
hipFree(d_areas);
hipFree(d_normals);
hipFree(d_elements_surrounding_elements);
hipFree(d_variables);
hipFree(d_old_variables);
hipFree(d_fluxes);
hipFree(d_step_factors);
double offload_end = get_time();
printf("Device offloading time = %lf(s)\n", offload_end - offload_start);
printf("Total execution time of kernels = %lf(s)\n", kernel_end - kernel_start);
#ifdef OUTPUT
std::cout << "Saving solution..." << std::endl;
dump(h_variables, nel, nelr);
#endif
delete[] h_areas;
delete[] h_elements_surrounding_elements;
delete[] h_normals;
delete[] h_variables;
#ifdef DEBUG
delete[] h_step_factors;
#endif
std::cout << "Done..." << std::endl;
return 0;
}
| 371beb49e2b201857195d19bdfe7f15e8108f0dc.cu | /********************************************************************
euler3d.cpp
: parallelized code of CFD
- original code from the AIAA-2009-4001 by Andrew Corrigan, acorriga@gmu.edu
- parallelization with OpenCL API has been applied by
Jianbin Fang - j.fang@tudelft.nl
Delft University of Technology
Faculty of Electrical Engineering, Mathematics and Computer Science
Department of Software Technology
Parallel and Distributed Systems Group
on 24/03/2011
********************************************************************/
#include <iostream>
#include <fstream>
#include <math.h>
#include <cuda.h>
#include "util.h"
/*
* Options
*
*/
#define GAMMA 1.4f
#define iterations 2000
#ifndef block_length
#define block_length 192
#endif
#define NDIM 3
#define NNB 4
#define RK 3 // 3rd order RK
#define ff_mach 1.2f
#define deg_angle_of_attack 0.0f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
#if block_length > 128
#warning "the kernels may fail too launch on some systems if the block length is too large"
#endif
double get_time() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
//self-defined user type
typedef struct{
float x;
float y;
float z;
} Float3;
__device__
inline void compute_velocity(const float density, const Float3 momentum, Float3* velocity){
velocity->x = momentum.x / density;
velocity->y = momentum.y / density;
velocity->z = momentum.z / density;
}
__device__
inline float compute_speed_sqd(const Float3 velocity){
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__device__
inline float compute_pressure(const float density, const float density_energy, const float speed_sqd){
return ((float)(GAMMA) - (float)(1.0f))*(density_energy - (float)(0.5f)*density*speed_sqd);
}
// sqrt is a device function
__device__
inline float compute_speed_of_sound(const float density, const float pressure){
return sqrt((float)(GAMMA)*pressure/density);
}
__device__ __host__
inline void compute_flux_contribution(const float density,
Float3 momentum,
const float density_energy,
const float pressure,
const Float3 velocity,
Float3* fc_momentum_x,
Float3* fc_momentum_y,
Float3* fc_momentum_z,
Float3* fc_density_energy)
{
fc_momentum_x->x = velocity.x*momentum.x + pressure;
fc_momentum_x->y = velocity.x*momentum.y;
fc_momentum_x->z = velocity.x*momentum.z;
fc_momentum_y->x = fc_momentum_x->y;
fc_momentum_y->y = velocity.y*momentum.y + pressure;
fc_momentum_y->z = velocity.y*momentum.z;
fc_momentum_z->x = fc_momentum_x->z;
fc_momentum_z->y = fc_momentum_y->z;
fc_momentum_z->z = velocity.z*momentum.z + pressure;
const float de_p = density_energy+pressure;
fc_density_energy->x = velocity.x*de_p;
fc_density_energy->y = velocity.y*de_p;
fc_density_energy->z = velocity.z*de_p;
}
void copy(float* dst, const float* src, const int N){
cudaMemcpy(dst, src, N*sizeof(float), cudaMemcpyDeviceToDevice);
}
void dump(const float *h_variables, const int nel, const int nelr){
{
std::ofstream file("density");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl;
}
{
std::ofstream file("momentum");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++)
{
for(int j = 0; j != NDIM; j++)
file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " ";
file << std::endl;
}
}
{
std::ofstream file("density_energy");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl;
}
}
__global__ void initialize_buffer(float *d, const float val, const int nelr)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
if (i < nelr) d[i] = val;
}
__global__ void initialize_variables(const int nelr, float* variables, const float* ff_variable)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int j = 0; j < NVAR; j++)
variables[i + j*nelr] = ff_variable[j];
}
__global__ void compute_step_factor(const int nelr,
float* variables,
float* areas,
float* step_factors){
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
if( i >= nelr) return;
float density = variables[i + VAR_DENSITY*nelr];
Float3 momentum;
momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr];
Float3 velocity; compute_velocity(density, momentum, &velocity);
float speed_sqd = compute_speed_sqd(velocity);
float pressure = compute_pressure(density, density_energy, speed_sqd);
float speed_of_sound = compute_speed_of_sound(density, pressure);
step_factors[i] = (float)(0.5f) / (sqrt(areas[i]) * (sqrt(speed_sqd) + speed_of_sound));
}
__global__ void
compute_flux(
int nelr,
int* elements_surrounding_elements,
float* normals,
float* variables,
float* ff_variable,
float* fluxes,
Float3* ff_flux_contribution_density_energy,
Float3* ff_flux_contribution_momentum_x,
Float3* ff_flux_contribution_momentum_y,
Float3* ff_flux_contribution_momentum_z){
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
if( i >= nelr) return;
const float smoothing_coefficient = (float)(0.2f);
int j, nb;
Float3 normal;
float normal_len;
float factor;
float density_i = variables[i + VAR_DENSITY*nelr];
Float3 momentum_i;
momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
Float3 velocity_i;
compute_velocity(density_i, momentum_i, &velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
//float speed_sqd_i;
//compute_speed_sqd(velocity_i, speed_sqd_i);
float speed_i = sqrt(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
Float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
Float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i,
&flux_contribution_i_momentum_x, &flux_contribution_i_momentum_y,
&flux_contribution_i_momentum_z, &flux_contribution_i_density_energy);
float flux_i_density = (float)(0.0f);
Float3 flux_i_momentum;
flux_i_momentum.x = (float)(0.0f);
flux_i_momentum.y = (float)(0.0f);
flux_i_momentum.z = (float)(0.0f);
float flux_i_density_energy = (float)(0.0f);
Float3 velocity_nb;
float density_nb, density_energy_nb;
Float3 momentum_nb;
Float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
Float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < NNB; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
normal.x = normals[i + (j + 0*NNB)*nelr];
normal.y = normals[i + (j + 1*NNB)*nelr];
normal.z = normals[i + (j + 2*NNB)*nelr];
normal_len = sqrt(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
density_nb = variables[nb + VAR_DENSITY*nelr];
momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
compute_velocity(density_nb, momentum_nb, &velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb,
&flux_contribution_nb_momentum_x, &flux_contribution_nb_momentum_y, &flux_contribution_nb_momentum_z,
&flux_contribution_nb_density_energy);
// artificial viscosity
factor = -normal_len*smoothing_coefficient*(float)(0.5f)*(speed_i + sqrt(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = (float)(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = (float)(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = (float)(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
else if(nb == -1) // a wing boundary
{
flux_i_momentum.x += normal.x*pressure_i;
flux_i_momentum.y += normal.y*pressure_i;
flux_i_momentum.z += normal.z*pressure_i;
}
else if(nb == -2) // a far field boundary
{
factor = (float)(0.5f)*normal.x;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x);
factor = (float)(0.5f)*normal.y;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y);
factor = (float)(0.5f)*normal.z;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
}
__global__ void
time_step(int j, int nelr,
const float* old_variables,
float* variables,
const float* step_factors,
const float* fluxes) {
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
if( i >= nelr) return;
float factor = step_factors[i]/(float)(RK+1-j);
variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr];
variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr];
variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr];
variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr];
variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr];
}
/*
* Main function
*/
int main(int argc, char** argv){
printf("WG size of kernel:initialize = %d\nWG size of kernel:compute_step_factor = %d\nWG size of kernel:compute_flux = %d\nWG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4);
if (argc < 2){
std::cout << "Please specify data file name" << std::endl;
return 0;
}
const char* data_file_name = argv[1];
float h_ff_variable[NVAR];
// set far field conditions and load them into constant memory on the gpu
//{
const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack);
h_ff_variable[VAR_DENSITY] = float(1.4);
float ff_pressure = float(1.0f);
float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]);
float ff_speed = float(ff_mach)*ff_speed_of_sound;
Float3 ff_velocity;
ff_velocity.x = ff_speed*float(cos((float)angle_of_attack));
ff_velocity.y = ff_speed*float(sin((float)angle_of_attack));
ff_velocity.z = 0.0f;
h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x;
h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y;
h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z;
h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f));
Float3 h_ff_momentum;
h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0);
h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1);
h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2);
Float3 h_ff_flux_contribution_momentum_x;
Float3 h_ff_flux_contribution_momentum_y;
Float3 h_ff_flux_contribution_momentum_z;
Float3 h_ff_flux_contribution_density_energy;
compute_flux_contribution(h_ff_variable[VAR_DENSITY],
h_ff_momentum,
h_ff_variable[VAR_DENSITY_ENERGY],
ff_pressure,
ff_velocity,
&h_ff_flux_contribution_momentum_x,
&h_ff_flux_contribution_momentum_y,
&h_ff_flux_contribution_momentum_z,
&h_ff_flux_contribution_density_energy);
int nel;
int nelr;
std::ifstream file(data_file_name, std::ifstream::in);
if(!file.good()){
throw(std::string("can not find/open file! ")+data_file_name);
}
file >> nel;
nelr = block_length*((nel / block_length )+ std::min(1, nel % block_length));
std::cout<<"--cambine: nel="<<nel<<", nelr="<<nelr<<std::endl;
float* h_areas = new float[nelr];
int* h_elements_surrounding_elements = new int[nelr*NNB];
float* h_normals = new float[nelr*NDIM*NNB];
// read in data
for(int i = 0; i < nel; i++)
{
file >> h_areas[i];
for(int j = 0; j < NNB; j++)
{
file >> h_elements_surrounding_elements[i + j*nelr];
if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1;
h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering
for(int k = 0; k < NDIM; k++)
{
file >> h_normals[i + (j + k*NNB)*nelr];
h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr];
}
}
}
// fill in remaining data
int last = nel-1;
for(int i = nel; i < nelr; i++)
{
h_areas[i] = h_areas[last];
for(int j = 0; j < NNB; j++)
{
// duplicate the last element
h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr];
for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr];
}
}
float* h_variables = new float[nelr*NVAR];
#ifdef DEBUG
float* h_step_factors = new float[nelr];
#endif
double offload_start = get_time();
float *d_ff_variable;
Float3 *d_ff_flux_contribution_momentum_x;
Float3 *d_ff_flux_contribution_momentum_y;
Float3 *d_ff_flux_contribution_momentum_z;
Float3 *d_ff_flux_contribution_density_energy;
cudaMalloc((void**)&d_ff_variable, sizeof(float)*NVAR);
cudaMemcpy(d_ff_variable, h_ff_variable, sizeof(float)*NVAR, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_ff_flux_contribution_momentum_x, sizeof(Float3));
cudaMemcpy(d_ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(Float3), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_ff_flux_contribution_momentum_y, sizeof(Float3));
cudaMemcpy(d_ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(Float3), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_ff_flux_contribution_momentum_z, sizeof(Float3));
cudaMemcpy(d_ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(Float3), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_ff_flux_contribution_density_energy, sizeof(Float3));
cudaMemcpy(d_ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(Float3), cudaMemcpyHostToDevice);
float* d_areas;
cudaMalloc((void**)&d_areas, sizeof(float)*nelr);
cudaMemcpy(d_areas, h_areas, sizeof(float)*nelr, cudaMemcpyHostToDevice);
float* d_normals;
cudaMalloc((void**)&d_normals, sizeof(float)*nelr*NDIM*NNB);
cudaMemcpy(d_normals, h_normals, sizeof(float)*nelr*NDIM*NNB, cudaMemcpyHostToDevice);
int* d_elements_surrounding_elements;
cudaMalloc((void**)&d_elements_surrounding_elements, sizeof(int)*nelr*NNB);
cudaMemcpy(d_elements_surrounding_elements, h_elements_surrounding_elements, sizeof(int)*nelr*NNB, cudaMemcpyHostToDevice);
// Create arrays and set initial conditions
float* d_variables;
cudaMalloc((void**)&d_variables, sizeof(float)*nelr*NVAR);
float* d_old_variables;
cudaMalloc((void**)&d_old_variables, sizeof(float)*nelr*NVAR);
float* d_fluxes;
cudaMalloc((void**)&d_fluxes, sizeof(float)*nelr*NVAR);
float* d_step_factors;
cudaMalloc((void**)&d_step_factors, sizeof(float)*nelr);
dim3 gridDim1 ((nelr + BLOCK_SIZE_1 - 1)/BLOCK_SIZE_1);
dim3 gridDim2 ((nelr + BLOCK_SIZE_2 - 1)/BLOCK_SIZE_2);
dim3 gridDim3 ((nelr + BLOCK_SIZE_3 - 1)/BLOCK_SIZE_3);
dim3 gridDim4 ((nelr + BLOCK_SIZE_4 - 1)/BLOCK_SIZE_4);
cudaDeviceSynchronize();
double kernel_start = get_time();
initialize_variables<<<gridDim1, BLOCK_SIZE_1>>>(nelr, d_variables, d_ff_variable);
initialize_variables<<<gridDim1, BLOCK_SIZE_1>>>(nelr, d_old_variables, d_ff_variable);
initialize_variables<<<gridDim1, BLOCK_SIZE_1>>>(nelr, d_fluxes, d_ff_variable);
initialize_buffer<<<gridDim1, BLOCK_SIZE_1>>>(d_step_factors, 0, nelr);
// Begin iterations
for(int n = 0; n < iterations; n++){
copy(d_old_variables, d_variables, nelr*NVAR);
// for the first iteration we compute the time step
compute_step_factor<<<gridDim2, BLOCK_SIZE_2>>>(nelr, d_variables, d_areas, d_step_factors);
#ifdef DEBUG
cudaMemcpy(h_step_factors, d_step_factors, sizeof(float)*nelr, cudaMemDeviceToHost);
for (int i = 0; i < 16; i++) printf("step factor: i=%d %f\n", i, h_step_factors[i]);
#endif
for(int j = 0; j < RK; j++){
compute_flux<<<gridDim3, BLOCK_SIZE_3>>>(nelr, d_elements_surrounding_elements, d_normals,
d_variables, d_ff_variable, d_fluxes, d_ff_flux_contribution_density_energy, \
d_ff_flux_contribution_momentum_x, d_ff_flux_contribution_momentum_y,
d_ff_flux_contribution_momentum_z);
time_step<<<gridDim4, BLOCK_SIZE_4>>>(j, nelr, d_old_variables, d_variables, d_step_factors, d_fluxes);
}
}
cudaDeviceSynchronize();
double kernel_end = get_time();
cudaMemcpy(h_variables, d_variables, sizeof(float)*nelr*NVAR, cudaMemcpyDeviceToHost);
cudaFree(d_ff_variable);
cudaFree(d_ff_flux_contribution_momentum_x);
cudaFree(d_ff_flux_contribution_momentum_y);
cudaFree(d_ff_flux_contribution_momentum_z);
cudaFree(d_ff_flux_contribution_density_energy);
cudaFree(d_areas);
cudaFree(d_normals);
cudaFree(d_elements_surrounding_elements);
cudaFree(d_variables);
cudaFree(d_old_variables);
cudaFree(d_fluxes);
cudaFree(d_step_factors);
double offload_end = get_time();
printf("Device offloading time = %lf(s)\n", offload_end - offload_start);
printf("Total execution time of kernels = %lf(s)\n", kernel_end - kernel_start);
#ifdef OUTPUT
std::cout << "Saving solution..." << std::endl;
dump(h_variables, nel, nelr);
#endif
delete[] h_areas;
delete[] h_elements_surrounding_elements;
delete[] h_normals;
delete[] h_variables;
#ifdef DEBUG
delete[] h_step_factors;
#endif
std::cout << "Done..." << std::endl;
return 0;
}
|
45531a8ed35d0d2d562aa9be299d742625213094.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <roctracer/roctx.h>
#include <argparse/argparse.hpp>
#include "common.hpp"
#define TILE_SZ_A 64
#define TILE_SZ_B 16
#define TILE_SZ_RATIO (TILE_SZ_A / TILE_SZ_B)
/* NOTE: A and C are column major, B is row major
*/
__global__ void mygemm(float * __restrict__ c, //<! [out] and MxN matrix
const float *a, //<! [in] an MxK matrix
const float *b, //<! [in] an KxN matrix
const int M, const int N, const int K) {
// Macros for accessing flattened matrices
#define A(_i, _j) a[(_i) + (_j)*M]
#define B(_i, _j) b[(_i)*N + (_j)]
#define C(_i, _j) c[(_i) + (_j)*M]
// Shared memory for tiling input B array
__shared__ float B_s[TILE_SZ_RATIO][TILE_SZ_B];
// Index variables
const unsigned int row = blockDim.x * blockIdx.x + threadIdx.x;
const unsigned int col = blockIdx.y * TILE_SZ_B;
// Privatization of output variables
float c_reg[TILE_SZ_B];
// Initialize output values
for (unsigned int outIdx = 0; outIdx < TILE_SZ_B; ++outIdx) {
c_reg[outIdx] = 0;
}
// Loop over the input tiles
for (unsigned int tileIdx = 0; tileIdx < (K - 1) / TILE_SZ_RATIO + 1;
++tileIdx) {
// Load the tile of B into shared memory
const unsigned int i = threadIdx.x / TILE_SZ_B;
const unsigned int j = threadIdx.x % TILE_SZ_B;
if (tileIdx * TILE_SZ_RATIO + i < K && col + j < N) {
B_s[i][j] = B(tileIdx * TILE_SZ_RATIO + i, col + j);
} else {
B_s[i][j] = 0;
}
__syncthreads();
// Loop over elements inside the tile
for (unsigned int idx = 0; idx < TILE_SZ_RATIO; ++idx) {
// Load tile of A matrix into register
float a_reg;
if (row < M && tileIdx * TILE_SZ_RATIO + idx < K) {
a_reg = A(row, tileIdx * TILE_SZ_RATIO + idx);
} else {
a_reg = 0;
}
// Loop over and update the output elements assigned to the thread
for (unsigned int outIdx = 0; outIdx < TILE_SZ_B; ++outIdx) {
c_reg[outIdx] += a_reg * B_s[idx][outIdx];
}
}
__syncthreads();
}
for (unsigned int outIdx = 0; outIdx < TILE_SZ_B; ++outIdx) {
if (row < M && col + outIdx < N) {
C(row, col + outIdx) = c_reg[outIdx];
}
}
#undef A
#undef B
#undef C
}
int main(int argc, char **argv) {
argparse::Parser parser;
// default matrix sizes:
// A: 1489 x 1493
// B: 1493 x 1499
// C: 1489 x 1499
int m = 1489;
int n = 1499;
int k = 1493;
int nIters = 5;
int nWarmup = 5;
bool check = false;
parser.add_positional(m);
parser.add_positional(n);
parser.add_positional(k);
parser.add_option(nIters, "--iters");
parser.add_option(nWarmup, "--warmup");
parser.add_flag(check, "--check");
if (!parser.parse(argc, argv)) {
parser.help();
exit(EXIT_FAILURE);
}
const int64_t flop = int64_t(m) * int64_t(n) * int64_t(k) * 2;
// initialize host data
std::cout << "generate data\n";
roctxRangePush("generate data");
float *aHost, *bHost, *cHost, *cExpected;
CUDA_RUNTIME(hipHostMalloc(&aHost, m * k * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&bHost, k * n * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&cHost, m * n * sizeof(float), 0));
CUDA_RUNTIME(hipHostMalloc(&cExpected, m * n * sizeof(float), 0));
std::generate(aHost, aHost + m * k, random_int);
std::generate(bHost, bHost + k * n, random_int);
roctxRangePop();
// allocate device data
float *aDev, *bDev, *cDev;
CUDA_RUNTIME(hipMalloc(&aDev, m * k * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&bDev, k * n * sizeof(float)));
CUDA_RUNTIME(hipMalloc(&cDev, m * n * sizeof(float)));
// copy data to device
std::cout << "transfer to GPU\n";
roctxRangePush("host-to-device");
CUDA_RUNTIME(
hipMemcpy(aDev, aHost, m * k * sizeof(float), hipMemcpyDefault));
CUDA_RUNTIME(
hipMemcpy(bDev, bHost, k * n * sizeof(float), hipMemcpyDefault));
roctxRangePop();
// create events to time GPU kernel
hipEvent_t start, stop;
CUDA_RUNTIME(hipEventCreate(&start));
CUDA_RUNTIME(hipEventCreate(&stop));
// GPU kernel launch parameters
dim3 dimGrid((m + TILE_SZ_A - 1) / TILE_SZ_A, (n +TILE_SZ_B - 1) / TILE_SZ_B);
dim3 dimBlock(TILE_SZ_A, 1);
// total elapsed time
float elapsed = 0;
/* Launch the kernel nIters + nWarmup times
Check for correctness on the first time.
Record the time after nWarmup runs complete.
*/
for (int i = 0; i < nIters + nWarmup; ++i) {
CUDA_RUNTIME(hipEventRecord(start));
hipLaunchKernelGGL(( mygemm), dim3(dimGrid), dim3(dimBlock), 0, 0, cDev, aDev, bDev, m, n, k);
CUDA_RUNTIME(hipEventRecord(stop));
CUDA_RUNTIME(hipEventSynchronize(stop));
// check result once
if (check && 0 == i) {
// copy result to host
CUDA_RUNTIME(
hipMemcpy(cHost, cDev, m * n * sizeof(float), hipMemcpyDefault));
// check result on host
cpu_gemm(cExpected, aHost, bHost, m, n, k);
for (size_t i = 0; i < m * n; ++i) {
if (!equal(cExpected[i], cHost[i], 1e-6)) {
std::cout << "Error!\n";
exit(EXIT_FAILURE);
}
}
}
float millis;
CUDA_RUNTIME(hipEventElapsedTime(&millis, start, stop));
std::cout << i << ": " << millis << (i >= nWarmup ? " *" : " ") << "\n";
// record time after warmup runs
if (i >= nWarmup) {
elapsed += millis;
}
}
// print results
double gflops = flop / ((elapsed / nIters) / 1000) / 1e9;
std::cout << "kernel " << gflops << "GFLOPS (" << flop << " flop, "
<< (elapsed / nIters) / 1000 << "s)\n";
// release resources
CUDA_RUNTIME(hipEventDestroy(start));
CUDA_RUNTIME(hipEventDestroy(stop));
CUDA_RUNTIME(hipFree(aDev));
CUDA_RUNTIME(hipFree(bDev));
CUDA_RUNTIME(hipFree(cDev));
CUDA_RUNTIME(hipHostFree(aHost));
CUDA_RUNTIME(hipHostFree(bHost));
CUDA_RUNTIME(hipHostFree(cHost));
CUDA_RUNTIME(hipHostFree(cExpected));
return 0;
}
| 45531a8ed35d0d2d562aa9be299d742625213094.cu | #include <algorithm>
#include <nvToolsExt.h>
#include <argparse/argparse.hpp>
#include "common.hpp"
#define TILE_SZ_A 64
#define TILE_SZ_B 16
#define TILE_SZ_RATIO (TILE_SZ_A / TILE_SZ_B)
/* NOTE: A and C are column major, B is row major
*/
__global__ void mygemm(float * __restrict__ c, //<! [out] and MxN matrix
const float *a, //<! [in] an MxK matrix
const float *b, //<! [in] an KxN matrix
const int M, const int N, const int K) {
// Macros for accessing flattened matrices
#define A(_i, _j) a[(_i) + (_j)*M]
#define B(_i, _j) b[(_i)*N + (_j)]
#define C(_i, _j) c[(_i) + (_j)*M]
// Shared memory for tiling input B array
__shared__ float B_s[TILE_SZ_RATIO][TILE_SZ_B];
// Index variables
const unsigned int row = blockDim.x * blockIdx.x + threadIdx.x;
const unsigned int col = blockIdx.y * TILE_SZ_B;
// Privatization of output variables
float c_reg[TILE_SZ_B];
// Initialize output values
for (unsigned int outIdx = 0; outIdx < TILE_SZ_B; ++outIdx) {
c_reg[outIdx] = 0;
}
// Loop over the input tiles
for (unsigned int tileIdx = 0; tileIdx < (K - 1) / TILE_SZ_RATIO + 1;
++tileIdx) {
// Load the tile of B into shared memory
const unsigned int i = threadIdx.x / TILE_SZ_B;
const unsigned int j = threadIdx.x % TILE_SZ_B;
if (tileIdx * TILE_SZ_RATIO + i < K && col + j < N) {
B_s[i][j] = B(tileIdx * TILE_SZ_RATIO + i, col + j);
} else {
B_s[i][j] = 0;
}
__syncthreads();
// Loop over elements inside the tile
for (unsigned int idx = 0; idx < TILE_SZ_RATIO; ++idx) {
// Load tile of A matrix into register
float a_reg;
if (row < M && tileIdx * TILE_SZ_RATIO + idx < K) {
a_reg = A(row, tileIdx * TILE_SZ_RATIO + idx);
} else {
a_reg = 0;
}
// Loop over and update the output elements assigned to the thread
for (unsigned int outIdx = 0; outIdx < TILE_SZ_B; ++outIdx) {
c_reg[outIdx] += a_reg * B_s[idx][outIdx];
}
}
__syncthreads();
}
for (unsigned int outIdx = 0; outIdx < TILE_SZ_B; ++outIdx) {
if (row < M && col + outIdx < N) {
C(row, col + outIdx) = c_reg[outIdx];
}
}
#undef A
#undef B
#undef C
}
int main(int argc, char **argv) {
argparse::Parser parser;
// default matrix sizes:
// A: 1489 x 1493
// B: 1493 x 1499
// C: 1489 x 1499
int m = 1489;
int n = 1499;
int k = 1493;
int nIters = 5;
int nWarmup = 5;
bool check = false;
parser.add_positional(m);
parser.add_positional(n);
parser.add_positional(k);
parser.add_option(nIters, "--iters");
parser.add_option(nWarmup, "--warmup");
parser.add_flag(check, "--check");
if (!parser.parse(argc, argv)) {
parser.help();
exit(EXIT_FAILURE);
}
const int64_t flop = int64_t(m) * int64_t(n) * int64_t(k) * 2;
// initialize host data
std::cout << "generate data\n";
nvtxRangePush("generate data");
float *aHost, *bHost, *cHost, *cExpected;
CUDA_RUNTIME(cudaHostAlloc(&aHost, m * k * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&bHost, k * n * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&cHost, m * n * sizeof(float), 0));
CUDA_RUNTIME(cudaHostAlloc(&cExpected, m * n * sizeof(float), 0));
std::generate(aHost, aHost + m * k, random_int);
std::generate(bHost, bHost + k * n, random_int);
nvtxRangePop();
// allocate device data
float *aDev, *bDev, *cDev;
CUDA_RUNTIME(cudaMalloc(&aDev, m * k * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&bDev, k * n * sizeof(float)));
CUDA_RUNTIME(cudaMalloc(&cDev, m * n * sizeof(float)));
// copy data to device
std::cout << "transfer to GPU\n";
nvtxRangePush("host-to-device");
CUDA_RUNTIME(
cudaMemcpy(aDev, aHost, m * k * sizeof(float), cudaMemcpyDefault));
CUDA_RUNTIME(
cudaMemcpy(bDev, bHost, k * n * sizeof(float), cudaMemcpyDefault));
nvtxRangePop();
// create events to time GPU kernel
cudaEvent_t start, stop;
CUDA_RUNTIME(cudaEventCreate(&start));
CUDA_RUNTIME(cudaEventCreate(&stop));
// GPU kernel launch parameters
dim3 dimGrid((m + TILE_SZ_A - 1) / TILE_SZ_A, (n +TILE_SZ_B - 1) / TILE_SZ_B);
dim3 dimBlock(TILE_SZ_A, 1);
// total elapsed time
float elapsed = 0;
/* Launch the kernel nIters + nWarmup times
Check for correctness on the first time.
Record the time after nWarmup runs complete.
*/
for (int i = 0; i < nIters + nWarmup; ++i) {
CUDA_RUNTIME(cudaEventRecord(start));
mygemm<<<dimGrid, dimBlock>>>(cDev, aDev, bDev, m, n, k);
CUDA_RUNTIME(cudaEventRecord(stop));
CUDA_RUNTIME(cudaEventSynchronize(stop));
// check result once
if (check && 0 == i) {
// copy result to host
CUDA_RUNTIME(
cudaMemcpy(cHost, cDev, m * n * sizeof(float), cudaMemcpyDefault));
// check result on host
cpu_gemm(cExpected, aHost, bHost, m, n, k);
for (size_t i = 0; i < m * n; ++i) {
if (!equal(cExpected[i], cHost[i], 1e-6)) {
std::cout << "Error!\n";
exit(EXIT_FAILURE);
}
}
}
float millis;
CUDA_RUNTIME(cudaEventElapsedTime(&millis, start, stop));
std::cout << i << ": " << millis << (i >= nWarmup ? " *" : " ") << "\n";
// record time after warmup runs
if (i >= nWarmup) {
elapsed += millis;
}
}
// print results
double gflops = flop / ((elapsed / nIters) / 1000) / 1e9;
std::cout << "kernel " << gflops << "GFLOPS (" << flop << " flop, "
<< (elapsed / nIters) / 1000 << "s)\n";
// release resources
CUDA_RUNTIME(cudaEventDestroy(start));
CUDA_RUNTIME(cudaEventDestroy(stop));
CUDA_RUNTIME(cudaFree(aDev));
CUDA_RUNTIME(cudaFree(bDev));
CUDA_RUNTIME(cudaFree(cDev));
CUDA_RUNTIME(cudaFreeHost(aHost));
CUDA_RUNTIME(cudaFreeHost(bHost));
CUDA_RUNTIME(cudaFreeHost(cHost));
CUDA_RUNTIME(cudaFreeHost(cExpected));
return 0;
}
|
d9dd13b6258e0827142bece5fdf1f11e28290db0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cutlass/aligned_buffer.h>
#include <cutlass/core_io.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/matrix_shape.h>
#include <cutlass/numeric_types.h>
#include <cutlass/transform/pitch_linear_thread_map.h>
#include <cutlass/transform/threadblock/predicated_tile_iterator.h>
#include <cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h>
#include <cutlass/util/debug.h>
#include <cutlass/util/device_dump.h>
#include <cutlass/util/host_tensor.h>
#include <cutlass/util/reference/host/gemm.h>
#include <cutlass/util/reference/host/tensor_compare.h>
#include <cutlass/util/reference/host/tensor_fill.h>
#include <cutlass/util/tensor_view_io.h>
#include <iomanip>
#include <iostream>
#include "cuda_utils.cuh"
#include "tile_loader.h"
#define CEIL_DIV(m, n) ((m) + (n)-1) / (n)
template <typename Element, typename LOAD>
__global__ void TestTileLoader(LOAD load, Element* src) {
extern __shared__ Element shared_storage[];
int tid = threadIdx.y * blockDim.x + threadIdx.x;
load.template load(src, shared_storage, tid);
}
int TestRowMajor() {
// const int M = 1024;
// const int N = 512;
const int row = 64;
const int col = 64;
int numel = row * col;
using Element = cutlass::half_t;
using Layout = cutlass::layout::RowMajor;
int threads = 128;
int blocks = CEIL_DIV(numel, threads);
__half* src;
hipMalloc(&src, numel * sizeof(__half));
hipLaunchKernelGGL(( InitHalfs), dim3(blocks), dim3(threads), 0, 0, src, numel);
// PrintHalfs(src, numel);
// using Element = float;
// Element* src;
// CudaCheck(hipMalloc(&src, numel * sizeof(Element)));
int smem_size = int(sizeof(Element) * row * col);
const int kThreads = 32;
dim3 grid(1, 1);
dim3 block(kThreads, 1, 1);
// FillRandomFloats(src, numel);
// PrintFloats(src, numel);
// return 0;
// row-major to column-major
TileLoader<row, col, Element, kThreads, TileLayout::RowMajor,
TileLayout::SwizzledColumnMajor>
load1(row, col);
hipLaunchKernelGGL(( TestTileLoader<Element, decltype(load1)>)
, dim3(grid), dim3(block), smem_size, 0, load1, reinterpret_cast<Element*>(src));
// row-major to row-major
TileLoader<row, col, Element, kThreads, TileLayout::RowMajor,
TileLayout::SwizzledRowMajor>
load2(row, col);
hipLaunchKernelGGL(( TestTileLoader<Element, decltype(load2)>)
, dim3(grid), dim3(block), smem_size, 0, load2, reinterpret_cast<Element*>(src));
hipError_t result = hipDeviceSynchronize();
if (result != hipSuccess) {
std::cout << "Failed" << std::endl;
}
return (result == hipSuccess ? 0 : -1);
}
int TestColumnMajor() {
using Element = cutlass::half_t;
const int row = 32;
const int col = 8;
using Layout = cutlass::layout::ColumnMajor;
cutlass::HostTensor<Element, Layout> matrix({row, col});
cutlass::reference::host::BlockFillSequential(matrix.host_data(),
matrix.capacity());
// Dump the matrix.
// std::cout << "Matrix:\n" << matrix.host_view() << "\n";
// Copy the matrix to the device.
matrix.sync_device();
int smem_size = int(sizeof(Element) * row * col);
const int kThreads = 32;
dim3 grid(1, 1);
dim3 block(kThreads, 1, 1);
// column-major to column-major
TileLoader<row, col, Element, kThreads, TileLayout::ColumnMajor,
TileLayout::SwizzledColumnMajor>
load1(row, col);
hipLaunchKernelGGL(( TestTileLoader<Element, decltype(load1)>)
, dim3(grid), dim3(block), smem_size, 0, load1, matrix.device_ref().data());
// column-major to column-major
TileLoader<row, col, Element, kThreads, TileLayout::ColumnMajor,
TileLayout::SwizzledRowMajor>
load2(row, col);
hipLaunchKernelGGL(( TestTileLoader<Element, decltype(load2)>)
, dim3(grid), dim3(block), smem_size, 0, load2, matrix.device_ref().data());
hipError_t result = hipDeviceSynchronize();
if (result != hipSuccess) {
std::cout << "Failed" << std::endl;
}
return (result == hipSuccess ? 0 : -1);
}
int main() {
TestRowMajor();
TestColumnMajor();
}
| d9dd13b6258e0827142bece5fdf1f11e28290db0.cu |
#include <cutlass/aligned_buffer.h>
#include <cutlass/core_io.h>
#include <cutlass/gemm/gemm.h>
#include <cutlass/layout/matrix.h>
#include <cutlass/matrix_shape.h>
#include <cutlass/numeric_types.h>
#include <cutlass/transform/pitch_linear_thread_map.h>
#include <cutlass/transform/threadblock/predicated_tile_iterator.h>
#include <cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h>
#include <cutlass/util/debug.h>
#include <cutlass/util/device_dump.h>
#include <cutlass/util/host_tensor.h>
#include <cutlass/util/reference/host/gemm.h>
#include <cutlass/util/reference/host/tensor_compare.h>
#include <cutlass/util/reference/host/tensor_fill.h>
#include <cutlass/util/tensor_view_io.h>
#include <iomanip>
#include <iostream>
#include "cuda_utils.cuh"
#include "tile_loader.h"
#define CEIL_DIV(m, n) ((m) + (n)-1) / (n)
template <typename Element, typename LOAD>
__global__ void TestTileLoader(LOAD load, Element* src) {
extern __shared__ Element shared_storage[];
int tid = threadIdx.y * blockDim.x + threadIdx.x;
load.template load(src, shared_storage, tid);
}
int TestRowMajor() {
// const int M = 1024;
// const int N = 512;
const int row = 64;
const int col = 64;
int numel = row * col;
using Element = cutlass::half_t;
using Layout = cutlass::layout::RowMajor;
int threads = 128;
int blocks = CEIL_DIV(numel, threads);
__half* src;
cudaMalloc(&src, numel * sizeof(__half));
InitHalfs<<<blocks, threads>>>(src, numel);
// PrintHalfs(src, numel);
// using Element = float;
// Element* src;
// CudaCheck(cudaMalloc(&src, numel * sizeof(Element)));
int smem_size = int(sizeof(Element) * row * col);
const int kThreads = 32;
dim3 grid(1, 1);
dim3 block(kThreads, 1, 1);
// FillRandomFloats(src, numel);
// PrintFloats(src, numel);
// return 0;
// row-major to column-major
TileLoader<row, col, Element, kThreads, TileLayout::RowMajor,
TileLayout::SwizzledColumnMajor>
load1(row, col);
TestTileLoader<Element, decltype(load1)>
<<<grid, block, smem_size, 0>>>(load1, reinterpret_cast<Element*>(src));
// row-major to row-major
TileLoader<row, col, Element, kThreads, TileLayout::RowMajor,
TileLayout::SwizzledRowMajor>
load2(row, col);
TestTileLoader<Element, decltype(load2)>
<<<grid, block, smem_size, 0>>>(load2, reinterpret_cast<Element*>(src));
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cout << "Failed" << std::endl;
}
return (result == cudaSuccess ? 0 : -1);
}
int TestColumnMajor() {
using Element = cutlass::half_t;
const int row = 32;
const int col = 8;
using Layout = cutlass::layout::ColumnMajor;
cutlass::HostTensor<Element, Layout> matrix({row, col});
cutlass::reference::host::BlockFillSequential(matrix.host_data(),
matrix.capacity());
// Dump the matrix.
// std::cout << "Matrix:\n" << matrix.host_view() << "\n";
// Copy the matrix to the device.
matrix.sync_device();
int smem_size = int(sizeof(Element) * row * col);
const int kThreads = 32;
dim3 grid(1, 1);
dim3 block(kThreads, 1, 1);
// column-major to column-major
TileLoader<row, col, Element, kThreads, TileLayout::ColumnMajor,
TileLayout::SwizzledColumnMajor>
load1(row, col);
TestTileLoader<Element, decltype(load1)>
<<<grid, block, smem_size, 0>>>(load1, matrix.device_ref().data());
// column-major to column-major
TileLoader<row, col, Element, kThreads, TileLayout::ColumnMajor,
TileLayout::SwizzledRowMajor>
load2(row, col);
TestTileLoader<Element, decltype(load2)>
<<<grid, block, smem_size, 0>>>(load2, matrix.device_ref().data());
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
std::cout << "Failed" << std::endl;
}
return (result == cudaSuccess ? 0 : -1);
}
int main() {
TestRowMajor();
TestColumnMajor();
}
|
04264537b2bbb144638f9a5d37a9cc6bc475a3ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
// Code can sum more than 1024 elements.
// CUDA kernel to add elements of two arrays
__global__ void add(int n, float* x, float* y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
//Variables
int N = 100000000;
float* h_x, * h_y;
int size = sizeof(float) * N;
printf("Number of elements in the array %d.\n",N);
//Allocate Host Memory
h_x = (float*)malloc(size);
h_y = (float*)malloc(size);
//Create Device Pointers
float* d_x;
float* d_y;
//Allocate Device Memory
hipMalloc((void**)&d_x, size);
hipMalloc((void**)&d_y, size);
//Initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
h_x[i] = 1.0;
h_y[i] = 2.0;
}
//Memory copy Host to Device
hipMemcpy(d_x, h_x, size, hipMemcpyHostToDevice);
hipMemcpy(d_y, h_y, size, hipMemcpyHostToDevice);
// Create Blocks
int blockSize = 256;
int numberBlocks = (N + blockSize - 1) / blockSize;
//Launch kernel on N elements on the GPU
add << <numberBlocks, blockSize >> > (N, d_x, d_y);
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
}
//Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
//Memory copy Host to Device of the result
hipMemcpy(h_y, d_y, size, hipMemcpyDeviceToHost);
//Print array
//for (int i = 0; i < N; i++)
//printf("%d: %f\n", i, h_y[i]);
//Check for errors (all values should be 3.0)
float maxError = 0.0;
for (int i = 0; i < N; i++)
maxError = (float)fmax(maxError, fabs(h_y[i] - 3.0));
printf("Max error: %lf\n", maxError);
//Free cuda memory
hipFree(d_x);
hipFree(d_y);
//Free memory
free(h_x);
free(h_y);
return 0;
}
| 04264537b2bbb144638f9a5d37a9cc6bc475a3ba.cu | #include <stdio.h>
#include <math.h>
// Code can sum more than 1024 elements.
// CUDA kernel to add elements of two arrays
__global__ void add(int n, float* x, float* y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
//Variables
int N = 100000000;
float* h_x, * h_y;
int size = sizeof(float) * N;
printf("Number of elements in the array %d.\n",N);
//Allocate Host Memory
h_x = (float*)malloc(size);
h_y = (float*)malloc(size);
//Create Device Pointers
float* d_x;
float* d_y;
//Allocate Device Memory
cudaMalloc((void**)&d_x, size);
cudaMalloc((void**)&d_y, size);
//Initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
h_x[i] = 1.0;
h_y[i] = 2.0;
}
//Memory copy Host to Device
cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice);
// Create Blocks
int blockSize = 256;
int numberBlocks = (N + blockSize - 1) / blockSize;
//Launch kernel on N elements on the GPU
add << <numberBlocks, blockSize >> > (N, d_x, d_y);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
}
//Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
//Memory copy Host to Device of the result
cudaMemcpy(h_y, d_y, size, cudaMemcpyDeviceToHost);
//Print array
//for (int i = 0; i < N; i++)
//printf("%d: %f\n", i, h_y[i]);
//Check for errors (all values should be 3.0)
float maxError = 0.0;
for (int i = 0; i < N; i++)
maxError = (float)fmax(maxError, fabs(h_y[i] - 3.0));
printf("Max error: %lf\n", maxError);
//Free cuda memory
cudaFree(d_x);
cudaFree(d_y);
//Free memory
free(h_x);
free(h_y);
return 0;
}
|
6732ad40483d54f223acdfd90d7663cde899e407.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
#define TILE_SIZE 16
// Compute C = A * B
__global__ void matrixMultiplyShared(float *A, float *B, float *C,
int m,
int n,
int k) {
//@@ Insert code to implement matrix multiplication here
//@@ You have to use shared memory for this MP
__shared__ float ds_A[TILE_SIZE][TILE_SIZE];
__shared__ float ds_B[TILE_SIZE][TILE_SIZE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int COL = bx * blockDim.x + tx;
int ROW = by * blockDim.y + ty;
float sum = 0.0;
// ( m * n) X (n * k) = (m * k)
int iters = (n + TILE_SIZE-1)/TILE_SIZE;
for(int i = 0; i < iters; ++i)
{
// load to shared memory
int ax = i * TILE_SIZE + tx;
int by = i * TILE_SIZE + ty;
if (ROW < m && ax < n)
ds_A[ty][tx] = A[ ROW * n + ax ];
else
ds_A[ty][tx] = 0.0;
if (COL < k && by < n )
ds_B[ty][tx] = B[ by * k + COL];
else
ds_B[ty][tx] = 0.0;
__syncthreads();
for(int k=0;k<TILE_SIZE;++k)
sum += ds_A[ty][k] * ds_B[k][tx];
__syncthreads();
}
if(COL < k && ROW < m)
C[ROW*k + COL] = sum;
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA =
( float * )wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB =
( float * )wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
hostC =
( float * )malloc(numCRows*numCColumns*sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbLog(TRACE, "The dimensions of C are ", numCRows, " x ", numCColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
wbCheck(hipMalloc((void**)&deviceA, numARows*numAColumns*sizeof(float)));
wbCheck(hipMalloc((void**)&deviceB, numBRows*numBColumns*sizeof(float)));
wbCheck(hipMalloc((void**)&deviceC, numCRows*numCColumns*sizeof(float)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
wbCheck(hipMemcpy(deviceA, hostA, numARows*numAColumns*sizeof(float), hipMemcpyHostToDevice));
wbCheck(hipMemcpy(deviceB, hostB, numBRows*numBColumns*sizeof(float), hipMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 grid((numCColumns+TILE_SIZE-1)/TILE_SIZE, (numCRows+TILE_SIZE-1)/TILE_SIZE);
dim3 block(TILE_SIZE,TILE_SIZE);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( matrixMultiplyShared), dim3(grid), dim3(block), 0, 0,
deviceA,
deviceB,
deviceC,
numARows,
numAColumns,
numBColumns);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
wbCheck(hipMemcpy(hostC, deviceC, numCRows*numCColumns*sizeof(float), hipMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbCheck(hipFree(deviceA));
wbCheck(hipFree(deviceB));
wbCheck(hipFree(deviceC));
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
| 6732ad40483d54f223acdfd90d7663cde899e407.cu | #include <wb.h>
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
#define TILE_SIZE 16
// Compute C = A * B
__global__ void matrixMultiplyShared(float *A, float *B, float *C,
int m,
int n,
int k) {
//@@ Insert code to implement matrix multiplication here
//@@ You have to use shared memory for this MP
__shared__ float ds_A[TILE_SIZE][TILE_SIZE];
__shared__ float ds_B[TILE_SIZE][TILE_SIZE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int COL = bx * blockDim.x + tx;
int ROW = by * blockDim.y + ty;
float sum = 0.0;
// ( m * n) X (n * k) = (m * k)
int iters = (n + TILE_SIZE-1)/TILE_SIZE;
for(int i = 0; i < iters; ++i)
{
// load to shared memory
int ax = i * TILE_SIZE + tx;
int by = i * TILE_SIZE + ty;
if (ROW < m && ax < n)
ds_A[ty][tx] = A[ ROW * n + ax ];
else
ds_A[ty][tx] = 0.0;
if (COL < k && by < n )
ds_B[ty][tx] = B[ by * k + COL];
else
ds_B[ty][tx] = 0.0;
__syncthreads();
for(int k=0;k<TILE_SIZE;++k)
sum += ds_A[ty][k] * ds_B[k][tx];
__syncthreads();
}
if(COL < k && ROW < m)
C[ROW*k + COL] = sum;
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA =
( float * )wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB =
( float * )wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
hostC =
( float * )malloc(numCRows*numCColumns*sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbLog(TRACE, "The dimensions of C are ", numCRows, " x ", numCColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
wbCheck(cudaMalloc((void**)&deviceA, numARows*numAColumns*sizeof(float)));
wbCheck(cudaMalloc((void**)&deviceB, numBRows*numBColumns*sizeof(float)));
wbCheck(cudaMalloc((void**)&deviceC, numCRows*numCColumns*sizeof(float)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
wbCheck(cudaMemcpy(deviceA, hostA, numARows*numAColumns*sizeof(float), cudaMemcpyHostToDevice));
wbCheck(cudaMemcpy(deviceB, hostB, numBRows*numBColumns*sizeof(float), cudaMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 grid((numCColumns+TILE_SIZE-1)/TILE_SIZE, (numCRows+TILE_SIZE-1)/TILE_SIZE);
dim3 block(TILE_SIZE,TILE_SIZE);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
matrixMultiplyShared<<<grid, block>>>(
deviceA,
deviceB,
deviceC,
numARows,
numAColumns,
numBColumns);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
wbCheck(cudaMemcpy(hostC, deviceC, numCRows*numCColumns*sizeof(float), cudaMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbCheck(cudaFree(deviceA));
wbCheck(cudaFree(deviceB));
wbCheck(cudaFree(deviceC));
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
21496c689b419296747b71c2ae8c438cb638966b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <mma.h>
#include <cuda_fp16.hpp>
#include <math.h>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <vector>
using namespace nvcuda;
#define CHK_CUDA(expression) \
{ \
hipError_t status = (expression); \
if (status != hipSuccess) { \
std::cerr << "Error in file: " << __FILE__ << ", on line: " << __LINE__ << ": " << hipGetErrorString(status) \
<< std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
template <uint x>
struct Log2 {
static constexpr uint value = 1 + Log2<x / 2>::value;
};
template <>
struct Log2<1> {
static constexpr uint value = 0;
};
struct __align__(8) half4 {
half2 vals[2];
};
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint M_BLOCKS,
uint K_BLOCKS,
uint SMEM_STRIDE,
uint SMEM_STRIDE_ACC,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractFwdKernelNonAligned(const __half *__restrict input,
__half *__restrict output,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint smem_elems_per_warp,
uint smem_rows_per_warp,
uint output_size,
uint num_row_steps,
uint num_col_steps,
uint pad) {
uint warp_id = (threadIdx.x >> WARP_SIZE_LOG_2);
int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (WARP_SIZE - 1);
extern __shared__ half shmem_dynamic[];
half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp);
const half *sample_input = input + num_rows * num_cols * sample_id;
for (uint i = 0; i < num_rows; ++i, sample_input += num_cols) {
for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) {
(shmem + i * SMEM_STRIDE)[idx] = sample_input[idx];
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (int i = 0; i < num_rows; ++i) {
(shmem + i * SMEM_STRIDE)[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((half4 *)(shmem + i * SMEM_STRIDE))[lane_id] = zeros;
}
}
__syncwarp();
half *gmem_output = output + output_size * sample_id;
for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) {
gmem_output[idx] = shmem[idx];
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[M_BLOCKS][M_BLOCKS];
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::fill_fragment(acc[i][j], 0);
}
}
for (int k_step = 0; k_step < num_col_steps; k_step++) {
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[M_BLOCKS];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::col_major> b[M_BLOCKS];
for (int j = 0; j < M_BLOCKS; j++) {
int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16;
const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16);
wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE);
wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE);
}
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
float *shmem_store = reinterpret_cast<float *>(shmem);
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16);
wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC, wmma::mem_row_major);
}
}
half *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp;
int srcLine = 0;
for (int i = 0; i < num_rows; ++i, ++srcLine) {
if (i == ((M_BLOCKS - 1) * 16)) {
srcLine += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] = __float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + lane_id]);
}
}
// Padding
if (lane_id < pad) {
gmem_output[lane_id + output_size - 1] = __float2half(0);
}
}
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint M_BLOCKS,
uint K_BLOCKS,
uint SMEM_STRIDE,
uint SMEM_STRIDE_ACC,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractFwdKernel(const __half *__restrict input,
__half *__restrict output,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint smem_elems_per_warp,
uint smem_rows_per_warp,
uint output_size,
uint num_row_steps,
uint num_col_steps,
uint pad) {
uint warp_id = (threadIdx.x >> WARP_SIZE_LOG_2);
int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (WARP_SIZE - 1);
extern __shared__ half shmem_dynamic[];
half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp);
const half *sample_input = input + num_rows * num_cols * sample_id;
if (lane_id < (num_cols >> 2)) {
for (int i = 0; i < num_rows; ++i, sample_input += num_cols) {
((float2 *)(shmem + i * SMEM_STRIDE))[lane_id] = ((float2 *)sample_input)[lane_id];
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (int i = 0; i < num_rows; ++i) {
(shmem + i * SMEM_STRIDE)[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((half4 *)(shmem + i * SMEM_STRIDE))[lane_id] = zeros;
}
}
__syncwarp();
half *gmem_output = output + output_size * sample_id;
if (lane_id < (num_cols >> 2)) {
((float2 *)gmem_output)[lane_id] = ((float2 *)shmem)[lane_id];
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[M_BLOCKS][M_BLOCKS];
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::fill_fragment(acc[i][j], 0);
}
}
for (int k_step = 0; k_step < num_col_steps; k_step++) {
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[M_BLOCKS];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::col_major> b[M_BLOCKS];
for (int j = 0; j < M_BLOCKS; j++) {
int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16;
const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16);
wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE);
wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE);
}
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
float *shmem_store = reinterpret_cast<float *>(shmem);
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16);
wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC, wmma::mem_row_major);
}
}
half *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp;
int srcLine = 0;
for (int i = 0; i < num_rows; ++i, ++srcLine) {
if (i == ((M_BLOCKS - 1) * 16)) {
srcLine += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] = __float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + lane_id]);
}
}
// Padding
if (lane_id < pad) {
gmem_output[lane_id + output_size - 1] = __float2half(0);
}
}
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint ROW_TILES_PER_STEP,
uint COL_TILES_PER_STEP,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractBwdKernelNonAligned(const __half *__restrict input,
const __half *__restrict upstream_grad,
half __restrict *grad,
half __restrict *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint sample_size,
uint interaction_ugrad_size,
uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems,
uint interaction_ugrad_2D_stride,
uint input_size_elems,
uint input_stride,
uint num_row_steps,
uint num_col_steps,
uint row_tiles_per_step,
uint shared_mem_per_warp_size_byte) {
extern __shared__ half shared_mem[];
uint warp_id = (threadIdx.x >> WARP_SIZE_LOG_2);
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (WARP_SIZE - 1);
// ">> 1" to convert to half pointer
uint smem_warp_offset = warp_id * (shared_mem_per_warp_size_byte >> 1);
half *smem_in = &shared_mem[smem_warp_offset];
half *smem_temp = &shared_mem[smem_warp_offset + input_size_elems];
float *smem_out = reinterpret_cast<float *>(smem_temp);
// Global memory pointers for the current sample
// Input
uint gmem_input_sample_offset = sample_id * sample_size;
const half *gmem_input = &input[gmem_input_sample_offset];
// Interaction Gradient
const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
half *gmem_grad = &grad[gmem_grad_sample_offset];
// Bottom MLP gradient
half *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const half *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const half *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < interaction_ugrad_size; idx += WARP_SIZE) {
smem_in[idx] = gmem_ugrad_interactions[idx];
}
__syncwarp();
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
half ugrad_val = __float2half(0.0f);
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_temp[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = __float2half(0.0f);
}
}
__syncwarp();
// Input -> Shared Memory
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
const half *gmem_row_ptr = &gmem_input[row * num_cols];
for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) {
smem_row_ptr[idx] = gmem_row_ptr[idx];
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
smem_row_ptr[idx] = __float2half(0);
}
}
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
for (uint idx = lane_id; idx < num_cols_after_padding; idx += WARP_SIZE) {
smem_row_ptr[idx] = __float2half(0);
}
}
__syncwarp();
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[ROW_TILES_PER_STEP]
[ROW_TILES_PER_STEP];
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
const half *tile_ptr = smem_temp + ((i * interaction_ugrad_2D_stride + j) << TILE_DIM_LOG_2);
wmma::load_matrix_sync(a[i][j], tile_ptr, interaction_ugrad_2D_stride);
}
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[ROW_TILES_PER_STEP];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> b[ROW_TILES_PER_STEP];
for (int col_step = 0; col_step < num_col_steps; col_step++) {
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
const half *tile_ptr = smem_in + ((i * input_stride + col_step) << TILE_DIM_LOG_2);
wmma::fill_fragment(acc[i], 0);
wmma::load_matrix_sync(b[i], tile_ptr, input_stride);
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::mma_sync(acc[i], a[i][j], b[j], acc[i]);
}
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
float *tile_ptr = smem_out + i * TILE_DIM * TILE_DIM;
wmma::store_matrix_sync(tile_ptr, acc[i], TILE_DIM, wmma::mem_row_major);
}
__syncwarp();
uint gmem_grad_col = (col_step << TILE_DIM_LOG_2) + lane_id;
if (gmem_grad_col < num_cols) {
for (uint i = 0; i < num_rows; i++) {
gmem_grad[i * num_cols + gmem_grad_col] = __float2half(smem_out[(i << TILE_DIM_LOG_2) + lane_id]);
}
}
}
for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) {
gmem_mlp_grad[idx] = gmem_ugrad[idx];
}
}
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint ROW_TILES_PER_STEP,
uint COL_TILES_PER_STEP,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractBwdKernel(const __half *__restrict input,
const __half *__restrict upstream_grad,
half __restrict *grad,
half __restrict *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint sample_size,
uint interaction_ugrad_size,
uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems,
uint interaction_ugrad_2D_stride,
uint input_size_elems,
uint input_stride,
uint num_row_steps,
uint num_col_steps,
uint row_tiles_per_step,
uint shared_mem_per_warp_size_byte) {
extern __shared__ half shared_mem[];
uint warp_id = (threadIdx.x >> WARP_SIZE_LOG_2);
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (WARP_SIZE - 1);
// ">> 1" to convert to half pointer
uint smem_warp_offset = warp_id * (shared_mem_per_warp_size_byte >> 1);
half *smem_in = &shared_mem[smem_warp_offset];
half *smem_temp = &shared_mem[smem_warp_offset + input_size_elems];
float *smem_out = reinterpret_cast<float *>(smem_temp);
// Global memory pointers for the current sample
// Input
uint gmem_input_sample_offset = sample_id * sample_size;
const half *gmem_input = &input[gmem_input_sample_offset];
// Interaction Gradient
const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
half *gmem_grad = &grad[gmem_grad_sample_offset];
// Bottom MLP gradient
half *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const half *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const half *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < (interaction_ugrad_size >> 3); idx += WARP_SIZE) {
((float4 *)smem_in)[idx] = ((float4 *)gmem_ugrad_interactions)[idx];
}
uint offset = (interaction_ugrad_size >> 3) << 3;
for (uint idx = lane_id + offset; idx < interaction_ugrad_size; idx += WARP_SIZE) {
smem_in[idx] = gmem_ugrad_interactions[idx];
}
__syncwarp();
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
half ugrad_val = __float2half(0.0f);
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_temp[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = __float2half(0.0f);
}
}
__syncwarp();
// Input -> Shared Memory
if (lane_id < (num_cols >> 2)) {
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
const half *gmem_row_ptr = &gmem_input[row * num_cols];
((float2 *)smem_row_ptr)[lane_id] = ((float2 *)gmem_row_ptr)[lane_id];
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
smem_row_ptr[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
((half4 *)smem_row_ptr)[lane_id] = zeros;
}
}
__syncwarp();
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[ROW_TILES_PER_STEP]
[ROW_TILES_PER_STEP];
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
const half *tile_ptr = smem_temp + ((i * interaction_ugrad_2D_stride + j) << TILE_DIM_LOG_2);
wmma::load_matrix_sync(a[i][j], tile_ptr, interaction_ugrad_2D_stride);
}
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[ROW_TILES_PER_STEP];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> b[ROW_TILES_PER_STEP];
for (int col_step = 0; col_step < num_col_steps; col_step++) {
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
const half *tile_ptr = smem_in + ((i * input_stride + col_step) << TILE_DIM_LOG_2);
wmma::fill_fragment(acc[i], 0);
wmma::load_matrix_sync(b[i], tile_ptr, input_stride);
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::mma_sync(acc[i], a[i][j], b[j], acc[i]);
}
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
float *tile_ptr = smem_out + i * TILE_DIM * TILE_DIM;
wmma::store_matrix_sync(tile_ptr, acc[i], TILE_DIM, wmma::mem_row_major);
}
__syncwarp();
uint gmem_grad_col = (col_step << TILE_DIM_LOG_2) + lane_id;
if (gmem_grad_col < num_cols) {
for (uint i = 0; i < num_rows; i++) {
gmem_grad[i * num_cols + gmem_grad_col] = __float2half(smem_out[(i << TILE_DIM_LOG_2) + lane_id]);
}
}
}
if (lane_id < (num_cols >> 2)) {
((float2 *)gmem_mlp_grad)[lane_id] = ((float2 *)gmem_ugrad)[lane_id];
}
}
inline void dotBasedInteractFwd(const void *input,
const void *bottom_mlp_output,
void *output,
uint batch_size,
uint num_rows,
uint num_cols,
uint pad) {
const uint kWarpSize = 32;
const uint kWarpSizeLog2 = Log2<kWarpSize>::value;
const uint kTileDim = 16;
const uint kTileDimLog2 = Log2<kTileDim>::value;
const uint warps_per_threadblock = 4;
const uint threadblock_size = warps_per_threadblock * 32;
const uint kRowTilesPerStep = 2;
const uint kColTilesPerStep = 1;
// num tiles
uint num_row_tiles = (num_rows + kTileDim - 1) >> kTileDimLog2;
uint num_col_tiles = (num_cols + kTileDim - 1) >> kTileDimLog2;
// number of rows and columns after padding
uint num_rows_after_padding = kTileDim << 1;
uint num_cols_after_padding = num_col_tiles << kTileDimLog2;
uint num_row_steps = num_row_tiles / kRowTilesPerStep;
uint num_col_steps = num_col_tiles / kColTilesPerStep;
const uint K_BLOCKS = 8;
const uint M_BLOCKS = 2;
const uint SKEW_HALF = ((K_BLOCKS % 2) == 0) ? 8 : 0;
const uint SMEM_STRIDE = (K_BLOCKS * 16 + SKEW_HALF);
// multiple of 2 to guarantee 256-bit alignment for start of the row, at least 16 to safeload a tile
const uint smem_rows_per_warp = M_BLOCKS << 4;
const uint smem_elems_per_warp_mat = smem_rows_per_warp * SMEM_STRIDE;
const uint SKEW_HALF_ACC = ((M_BLOCKS % 2) == 0) ? 8 : 0;
const uint SMEM_STRIDE_ACC = (M_BLOCKS * 16 + SKEW_HALF_ACC);
const uint smem_elems_per_warp_acc = M_BLOCKS * 16 * SMEM_STRIDE_ACC * 2; // output in FP32
const uint smem_elems_per_warp =
(smem_elems_per_warp_mat > smem_elems_per_warp_acc) ? smem_elems_per_warp_mat : smem_elems_per_warp_acc;
uint output_size = num_cols + (num_rows * (num_rows - 1) >> 1) + pad;
bool float4_predicate = !((num_cols & 7) || (output_size & 7));
if (float4_predicate) {
hipLaunchKernelGGL(( dotBasedInteractFwdKernel<warps_per_threadblock,
threadblock_size,
M_BLOCKS,
K_BLOCKS,
SMEM_STRIDE,
SMEM_STRIDE_ACC,
kWarpSize,
kWarpSizeLog2,
kTileDim,
kTileDimLog2>)
, dim3((batch_size + warps_per_threadblock - 1) / warps_per_threadblock),
dim3(threadblock_size),
warps_per_threadblock * smem_elems_per_warp * sizeof(__half), 0, (const __half *)input,
(half *)output,
batch_size,
num_rows,
num_cols,
num_rows_after_padding,
num_cols_after_padding,
smem_elems_per_warp,
smem_rows_per_warp,
output_size,
num_row_steps,
num_col_steps,
pad);
} else {
hipLaunchKernelGGL(( dotBasedInteractFwdKernelNonAligned<warps_per_threadblock,
threadblock_size,
M_BLOCKS,
K_BLOCKS,
SMEM_STRIDE,
SMEM_STRIDE_ACC,
kWarpSize,
kWarpSizeLog2,
kTileDim,
kTileDimLog2>)
, dim3((batch_size + warps_per_threadblock - 1) / warps_per_threadblock),
dim3(threadblock_size),
warps_per_threadblock * smem_elems_per_warp * sizeof(__half), 0, (const __half *)input,
(half *)output,
batch_size,
num_rows,
num_cols,
num_rows_after_padding,
num_cols_after_padding,
smem_elems_per_warp,
smem_rows_per_warp,
output_size,
num_row_steps,
num_col_steps,
pad);
}
}
inline void dotBasedInteractBwd(void *input,
void *upstream_grad,
void *grad,
void *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint pad) {
const uint kWarpSize = 32;
const uint kWarpSizeLog2 = Log2<kWarpSize>::value;
const uint kTileDim = 16;
const uint kTileDimLog2 = Log2<kTileDim>::value;
const uint mem_skew_size = 8;
const uint kWarpsPerBlock = 4;
const uint kWarpsPerBlockLog2 = Log2<kWarpsPerBlock>::value;
const uint kNumThreads = kWarpsPerBlock * kWarpSize;
const uint kRowTilesPerStep = 2;
const uint kColTilesPerStep = 1;
uint row_tiles_per_step = num_rows > kTileDim ? kRowTilesPerStep : 1;
// num tiles
uint num_row_tiles = (num_rows + kTileDim - 1) >> kTileDimLog2;
uint num_col_tiles = (num_cols + kTileDim - 1) >> kTileDimLog2;
// number of rows and columns after padding
uint num_rows_after_padding = kTileDim << 1;
uint num_cols_after_padding = num_col_tiles << kTileDimLog2;
// 2D ugrad size and stride
uint interaction_ugrad_2D_stride = num_rows_after_padding + mem_skew_size;
uint interaction_ugrad_2D_size_elems = num_rows_after_padding * interaction_ugrad_2D_stride;
uint interaction_ugrad_2D_size_bytes = interaction_ugrad_2D_size_elems * sizeof(half);
// 1D ugrad size
uint interaction_ugrad_size = num_rows * (num_rows - 1) >> 1;
uint interaction_ugrad_size_with_padding = interaction_ugrad_size + pad;
// in_out place size and stride
uint input_stride = num_cols_after_padding + mem_skew_size;
uint input_size_elems = num_rows_after_padding * input_stride;
uint input_size_bytes = input_size_elems * sizeof(half);
// sample size
uint sample_size = num_rows * num_cols;
// output size
uint output_size_elems = kTileDim * kTileDim * kRowTilesPerStep * kColTilesPerStep;
uint output_size_bytes = output_size_elems * sizeof(float);
// staging area size
uint staging_area_size_bytes =
output_size_bytes > interaction_ugrad_2D_size_bytes ? output_size_bytes : interaction_ugrad_2D_size_bytes;
// Shared memory size
uint shared_mem_per_warp_size_byte = input_size_bytes + staging_area_size_bytes;
uint shared_mem_size_bytes = kWarpsPerBlock * shared_mem_per_warp_size_byte;
uint num_blocks = (batch_size + kWarpsPerBlock - 1) >> kWarpsPerBlockLog2;
uint num_row_steps = num_row_tiles / row_tiles_per_step;
uint num_col_steps = num_col_tiles / kColTilesPerStep;
bool float4_predicate = !((interaction_ugrad_size_with_padding & 7) || (num_cols & 7));
if (float4_predicate) {
hipLaunchKernelGGL(( dotBasedInteractBwdKernel<kWarpsPerBlock,
kNumThreads,
kRowTilesPerStep,
kColTilesPerStep,
kWarpSize,
kWarpSizeLog2,
kTileDim,
kTileDimLog2>)
, dim3(num_blocks), dim3(kNumThreads), shared_mem_size_bytes, 0, (const half *)input,
(const half *)upstream_grad,
(half *)grad,
(half *)bottom_mlp_grad,
batch_size,
num_rows,
num_cols,
num_rows_after_padding,
num_cols_after_padding,
sample_size,
interaction_ugrad_size,
interaction_ugrad_size_with_padding,
interaction_ugrad_2D_size_elems,
interaction_ugrad_2D_stride,
input_size_elems,
input_stride,
num_row_steps,
num_col_steps,
row_tiles_per_step,
shared_mem_per_warp_size_byte);
} else {
hipLaunchKernelGGL(( dotBasedInteractBwdKernelNonAligned<kWarpsPerBlock,
kNumThreads,
kRowTilesPerStep,
kColTilesPerStep,
kWarpSize,
kWarpSizeLog2,
kTileDim,
kTileDimLog2>)
, dim3(num_blocks), dim3(kNumThreads), shared_mem_size_bytes, 0, (const half *)input,
(const half *)upstream_grad,
(half *)grad,
(half *)bottom_mlp_grad,
batch_size,
num_rows,
num_cols,
num_rows_after_padding,
num_cols_after_padding,
sample_size,
interaction_ugrad_size,
interaction_ugrad_size_with_padding,
interaction_ugrad_2D_size_elems,
interaction_ugrad_2D_stride,
input_size_elems,
input_stride,
num_row_steps,
num_col_steps,
row_tiles_per_step,
shared_mem_per_warp_size_byte);
}
}
| 21496c689b419296747b71c2ae8c438cb638966b.cu | #include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <mma.h>
#include <cuda_fp16.hpp>
#include <math.h>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <vector>
using namespace nvcuda;
#define CHK_CUDA(expression) \
{ \
cudaError_t status = (expression); \
if (status != cudaSuccess) { \
std::cerr << "Error in file: " << __FILE__ << ", on line: " << __LINE__ << ": " << cudaGetErrorString(status) \
<< std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
template <uint x>
struct Log2 {
static constexpr uint value = 1 + Log2<x / 2>::value;
};
template <>
struct Log2<1> {
static constexpr uint value = 0;
};
struct __align__(8) half4 {
half2 vals[2];
};
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint M_BLOCKS,
uint K_BLOCKS,
uint SMEM_STRIDE,
uint SMEM_STRIDE_ACC,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractFwdKernelNonAligned(const __half *__restrict input,
__half *__restrict output,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint smem_elems_per_warp,
uint smem_rows_per_warp,
uint output_size,
uint num_row_steps,
uint num_col_steps,
uint pad) {
uint warp_id = (threadIdx.x >> WARP_SIZE_LOG_2);
int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (WARP_SIZE - 1);
extern __shared__ half shmem_dynamic[];
half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp);
const half *sample_input = input + num_rows * num_cols * sample_id;
for (uint i = 0; i < num_rows; ++i, sample_input += num_cols) {
for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) {
(shmem + i * SMEM_STRIDE)[idx] = sample_input[idx];
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (int i = 0; i < num_rows; ++i) {
(shmem + i * SMEM_STRIDE)[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((half4 *)(shmem + i * SMEM_STRIDE))[lane_id] = zeros;
}
}
__syncwarp();
half *gmem_output = output + output_size * sample_id;
for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) {
gmem_output[idx] = shmem[idx];
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[M_BLOCKS][M_BLOCKS];
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::fill_fragment(acc[i][j], 0);
}
}
for (int k_step = 0; k_step < num_col_steps; k_step++) {
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[M_BLOCKS];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::col_major> b[M_BLOCKS];
for (int j = 0; j < M_BLOCKS; j++) {
int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16;
const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16);
wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE);
wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE);
}
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
float *shmem_store = reinterpret_cast<float *>(shmem);
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16);
wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC, wmma::mem_row_major);
}
}
half *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp;
int srcLine = 0;
for (int i = 0; i < num_rows; ++i, ++srcLine) {
if (i == ((M_BLOCKS - 1) * 16)) {
srcLine += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] = __float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + lane_id]);
}
}
// Padding
if (lane_id < pad) {
gmem_output[lane_id + output_size - 1] = __float2half(0);
}
}
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint M_BLOCKS,
uint K_BLOCKS,
uint SMEM_STRIDE,
uint SMEM_STRIDE_ACC,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractFwdKernel(const __half *__restrict input,
__half *__restrict output,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint smem_elems_per_warp,
uint smem_rows_per_warp,
uint output_size,
uint num_row_steps,
uint num_col_steps,
uint pad) {
uint warp_id = (threadIdx.x >> WARP_SIZE_LOG_2);
int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (WARP_SIZE - 1);
extern __shared__ half shmem_dynamic[];
half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp);
const half *sample_input = input + num_rows * num_cols * sample_id;
if (lane_id < (num_cols >> 2)) {
for (int i = 0; i < num_rows; ++i, sample_input += num_cols) {
((float2 *)(shmem + i * SMEM_STRIDE))[lane_id] = ((float2 *)sample_input)[lane_id];
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (int i = 0; i < num_rows; ++i) {
(shmem + i * SMEM_STRIDE)[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((half4 *)(shmem + i * SMEM_STRIDE))[lane_id] = zeros;
}
}
__syncwarp();
half *gmem_output = output + output_size * sample_id;
if (lane_id < (num_cols >> 2)) {
((float2 *)gmem_output)[lane_id] = ((float2 *)shmem)[lane_id];
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[M_BLOCKS][M_BLOCKS];
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::fill_fragment(acc[i][j], 0);
}
}
for (int k_step = 0; k_step < num_col_steps; k_step++) {
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[M_BLOCKS];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::col_major> b[M_BLOCKS];
for (int j = 0; j < M_BLOCKS; j++) {
int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16;
const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16);
wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE);
wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE);
}
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
float *shmem_store = reinterpret_cast<float *>(shmem);
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16);
wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC, wmma::mem_row_major);
}
}
half *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp;
int srcLine = 0;
for (int i = 0; i < num_rows; ++i, ++srcLine) {
if (i == ((M_BLOCKS - 1) * 16)) {
srcLine += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] = __float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + lane_id]);
}
}
// Padding
if (lane_id < pad) {
gmem_output[lane_id + output_size - 1] = __float2half(0);
}
}
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint ROW_TILES_PER_STEP,
uint COL_TILES_PER_STEP,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractBwdKernelNonAligned(const __half *__restrict input,
const __half *__restrict upstream_grad,
half __restrict *grad,
half __restrict *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint sample_size,
uint interaction_ugrad_size,
uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems,
uint interaction_ugrad_2D_stride,
uint input_size_elems,
uint input_stride,
uint num_row_steps,
uint num_col_steps,
uint row_tiles_per_step,
uint shared_mem_per_warp_size_byte) {
extern __shared__ half shared_mem[];
uint warp_id = (threadIdx.x >> WARP_SIZE_LOG_2);
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (WARP_SIZE - 1);
// ">> 1" to convert to half pointer
uint smem_warp_offset = warp_id * (shared_mem_per_warp_size_byte >> 1);
half *smem_in = &shared_mem[smem_warp_offset];
half *smem_temp = &shared_mem[smem_warp_offset + input_size_elems];
float *smem_out = reinterpret_cast<float *>(smem_temp);
// Global memory pointers for the current sample
// Input
uint gmem_input_sample_offset = sample_id * sample_size;
const half *gmem_input = &input[gmem_input_sample_offset];
// Interaction Gradient
const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
half *gmem_grad = &grad[gmem_grad_sample_offset];
// Bottom MLP gradient
half *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const half *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const half *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < interaction_ugrad_size; idx += WARP_SIZE) {
smem_in[idx] = gmem_ugrad_interactions[idx];
}
__syncwarp();
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
half ugrad_val = __float2half(0.0f);
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_temp[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = __float2half(0.0f);
}
}
__syncwarp();
// Input -> Shared Memory
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
const half *gmem_row_ptr = &gmem_input[row * num_cols];
for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) {
smem_row_ptr[idx] = gmem_row_ptr[idx];
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
smem_row_ptr[idx] = __float2half(0);
}
}
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
for (uint idx = lane_id; idx < num_cols_after_padding; idx += WARP_SIZE) {
smem_row_ptr[idx] = __float2half(0);
}
}
__syncwarp();
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[ROW_TILES_PER_STEP]
[ROW_TILES_PER_STEP];
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
const half *tile_ptr = smem_temp + ((i * interaction_ugrad_2D_stride + j) << TILE_DIM_LOG_2);
wmma::load_matrix_sync(a[i][j], tile_ptr, interaction_ugrad_2D_stride);
}
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[ROW_TILES_PER_STEP];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> b[ROW_TILES_PER_STEP];
for (int col_step = 0; col_step < num_col_steps; col_step++) {
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
const half *tile_ptr = smem_in + ((i * input_stride + col_step) << TILE_DIM_LOG_2);
wmma::fill_fragment(acc[i], 0);
wmma::load_matrix_sync(b[i], tile_ptr, input_stride);
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::mma_sync(acc[i], a[i][j], b[j], acc[i]);
}
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
float *tile_ptr = smem_out + i * TILE_DIM * TILE_DIM;
wmma::store_matrix_sync(tile_ptr, acc[i], TILE_DIM, wmma::mem_row_major);
}
__syncwarp();
uint gmem_grad_col = (col_step << TILE_DIM_LOG_2) + lane_id;
if (gmem_grad_col < num_cols) {
for (uint i = 0; i < num_rows; i++) {
gmem_grad[i * num_cols + gmem_grad_col] = __float2half(smem_out[(i << TILE_DIM_LOG_2) + lane_id]);
}
}
}
for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) {
gmem_mlp_grad[idx] = gmem_ugrad[idx];
}
}
template <uint WARPS_PER_BLOCK,
uint THREADBLOCK_SIZE,
uint ROW_TILES_PER_STEP,
uint COL_TILES_PER_STEP,
uint WARP_SIZE,
uint WARP_SIZE_LOG_2,
uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractBwdKernel(const __half *__restrict input,
const __half *__restrict upstream_grad,
half __restrict *grad,
half __restrict *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint num_rows_after_padding,
uint num_cols_after_padding,
uint sample_size,
uint interaction_ugrad_size,
uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems,
uint interaction_ugrad_2D_stride,
uint input_size_elems,
uint input_stride,
uint num_row_steps,
uint num_col_steps,
uint row_tiles_per_step,
uint shared_mem_per_warp_size_byte) {
extern __shared__ half shared_mem[];
uint warp_id = (threadIdx.x >> WARP_SIZE_LOG_2);
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (WARP_SIZE - 1);
// ">> 1" to convert to half pointer
uint smem_warp_offset = warp_id * (shared_mem_per_warp_size_byte >> 1);
half *smem_in = &shared_mem[smem_warp_offset];
half *smem_temp = &shared_mem[smem_warp_offset + input_size_elems];
float *smem_out = reinterpret_cast<float *>(smem_temp);
// Global memory pointers for the current sample
// Input
uint gmem_input_sample_offset = sample_id * sample_size;
const half *gmem_input = &input[gmem_input_sample_offset];
// Interaction Gradient
const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
half *gmem_grad = &grad[gmem_grad_sample_offset];
// Bottom MLP gradient
half *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const half *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const half *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < (interaction_ugrad_size >> 3); idx += WARP_SIZE) {
((float4 *)smem_in)[idx] = ((float4 *)gmem_ugrad_interactions)[idx];
}
uint offset = (interaction_ugrad_size >> 3) << 3;
for (uint idx = lane_id + offset; idx < interaction_ugrad_size; idx += WARP_SIZE) {
smem_in[idx] = gmem_ugrad_interactions[idx];
}
__syncwarp();
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
half ugrad_val = __float2half(0.0f);
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_temp[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = __float2half(0.0f);
}
}
__syncwarp();
// Input -> Shared Memory
if (lane_id < (num_cols >> 2)) {
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
const half *gmem_row_ptr = &gmem_input[row * num_cols];
((float2 *)smem_row_ptr)[lane_id] = ((float2 *)gmem_row_ptr)[lane_id];
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
smem_row_ptr[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
((half4 *)smem_row_ptr)[lane_id] = zeros;
}
}
__syncwarp();
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[ROW_TILES_PER_STEP]
[ROW_TILES_PER_STEP];
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
const half *tile_ptr = smem_temp + ((i * interaction_ugrad_2D_stride + j) << TILE_DIM_LOG_2);
wmma::load_matrix_sync(a[i][j], tile_ptr, interaction_ugrad_2D_stride);
}
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[ROW_TILES_PER_STEP];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> b[ROW_TILES_PER_STEP];
for (int col_step = 0; col_step < num_col_steps; col_step++) {
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
const half *tile_ptr = smem_in + ((i * input_stride + col_step) << TILE_DIM_LOG_2);
wmma::fill_fragment(acc[i], 0);
wmma::load_matrix_sync(b[i], tile_ptr, input_stride);
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::mma_sync(acc[i], a[i][j], b[j], acc[i]);
}
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
float *tile_ptr = smem_out + i * TILE_DIM * TILE_DIM;
wmma::store_matrix_sync(tile_ptr, acc[i], TILE_DIM, wmma::mem_row_major);
}
__syncwarp();
uint gmem_grad_col = (col_step << TILE_DIM_LOG_2) + lane_id;
if (gmem_grad_col < num_cols) {
for (uint i = 0; i < num_rows; i++) {
gmem_grad[i * num_cols + gmem_grad_col] = __float2half(smem_out[(i << TILE_DIM_LOG_2) + lane_id]);
}
}
}
if (lane_id < (num_cols >> 2)) {
((float2 *)gmem_mlp_grad)[lane_id] = ((float2 *)gmem_ugrad)[lane_id];
}
}
inline void dotBasedInteractFwd(const void *input,
const void *bottom_mlp_output,
void *output,
uint batch_size,
uint num_rows,
uint num_cols,
uint pad) {
const uint kWarpSize = 32;
const uint kWarpSizeLog2 = Log2<kWarpSize>::value;
const uint kTileDim = 16;
const uint kTileDimLog2 = Log2<kTileDim>::value;
const uint warps_per_threadblock = 4;
const uint threadblock_size = warps_per_threadblock * 32;
const uint kRowTilesPerStep = 2;
const uint kColTilesPerStep = 1;
// num tiles
uint num_row_tiles = (num_rows + kTileDim - 1) >> kTileDimLog2;
uint num_col_tiles = (num_cols + kTileDim - 1) >> kTileDimLog2;
// number of rows and columns after padding
uint num_rows_after_padding = kTileDim << 1;
uint num_cols_after_padding = num_col_tiles << kTileDimLog2;
uint num_row_steps = num_row_tiles / kRowTilesPerStep;
uint num_col_steps = num_col_tiles / kColTilesPerStep;
const uint K_BLOCKS = 8;
const uint M_BLOCKS = 2;
const uint SKEW_HALF = ((K_BLOCKS % 2) == 0) ? 8 : 0;
const uint SMEM_STRIDE = (K_BLOCKS * 16 + SKEW_HALF);
// multiple of 2 to guarantee 256-bit alignment for start of the row, at least 16 to safeload a tile
const uint smem_rows_per_warp = M_BLOCKS << 4;
const uint smem_elems_per_warp_mat = smem_rows_per_warp * SMEM_STRIDE;
const uint SKEW_HALF_ACC = ((M_BLOCKS % 2) == 0) ? 8 : 0;
const uint SMEM_STRIDE_ACC = (M_BLOCKS * 16 + SKEW_HALF_ACC);
const uint smem_elems_per_warp_acc = M_BLOCKS * 16 * SMEM_STRIDE_ACC * 2; // output in FP32
const uint smem_elems_per_warp =
(smem_elems_per_warp_mat > smem_elems_per_warp_acc) ? smem_elems_per_warp_mat : smem_elems_per_warp_acc;
uint output_size = num_cols + (num_rows * (num_rows - 1) >> 1) + pad;
bool float4_predicate = !((num_cols & 7) || (output_size & 7));
if (float4_predicate) {
dotBasedInteractFwdKernel<warps_per_threadblock,
threadblock_size,
M_BLOCKS,
K_BLOCKS,
SMEM_STRIDE,
SMEM_STRIDE_ACC,
kWarpSize,
kWarpSizeLog2,
kTileDim,
kTileDimLog2>
<<<(batch_size + warps_per_threadblock - 1) / warps_per_threadblock,
threadblock_size,
warps_per_threadblock * smem_elems_per_warp * sizeof(__half)>>>((const __half *)input,
(half *)output,
batch_size,
num_rows,
num_cols,
num_rows_after_padding,
num_cols_after_padding,
smem_elems_per_warp,
smem_rows_per_warp,
output_size,
num_row_steps,
num_col_steps,
pad);
} else {
dotBasedInteractFwdKernelNonAligned<warps_per_threadblock,
threadblock_size,
M_BLOCKS,
K_BLOCKS,
SMEM_STRIDE,
SMEM_STRIDE_ACC,
kWarpSize,
kWarpSizeLog2,
kTileDim,
kTileDimLog2>
<<<(batch_size + warps_per_threadblock - 1) / warps_per_threadblock,
threadblock_size,
warps_per_threadblock * smem_elems_per_warp * sizeof(__half)>>>((const __half *)input,
(half *)output,
batch_size,
num_rows,
num_cols,
num_rows_after_padding,
num_cols_after_padding,
smem_elems_per_warp,
smem_rows_per_warp,
output_size,
num_row_steps,
num_col_steps,
pad);
}
}
inline void dotBasedInteractBwd(void *input,
void *upstream_grad,
void *grad,
void *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint pad) {
const uint kWarpSize = 32;
const uint kWarpSizeLog2 = Log2<kWarpSize>::value;
const uint kTileDim = 16;
const uint kTileDimLog2 = Log2<kTileDim>::value;
const uint mem_skew_size = 8;
const uint kWarpsPerBlock = 4;
const uint kWarpsPerBlockLog2 = Log2<kWarpsPerBlock>::value;
const uint kNumThreads = kWarpsPerBlock * kWarpSize;
const uint kRowTilesPerStep = 2;
const uint kColTilesPerStep = 1;
uint row_tiles_per_step = num_rows > kTileDim ? kRowTilesPerStep : 1;
// num tiles
uint num_row_tiles = (num_rows + kTileDim - 1) >> kTileDimLog2;
uint num_col_tiles = (num_cols + kTileDim - 1) >> kTileDimLog2;
// number of rows and columns after padding
uint num_rows_after_padding = kTileDim << 1;
uint num_cols_after_padding = num_col_tiles << kTileDimLog2;
// 2D ugrad size and stride
uint interaction_ugrad_2D_stride = num_rows_after_padding + mem_skew_size;
uint interaction_ugrad_2D_size_elems = num_rows_after_padding * interaction_ugrad_2D_stride;
uint interaction_ugrad_2D_size_bytes = interaction_ugrad_2D_size_elems * sizeof(half);
// 1D ugrad size
uint interaction_ugrad_size = num_rows * (num_rows - 1) >> 1;
uint interaction_ugrad_size_with_padding = interaction_ugrad_size + pad;
// in_out place size and stride
uint input_stride = num_cols_after_padding + mem_skew_size;
uint input_size_elems = num_rows_after_padding * input_stride;
uint input_size_bytes = input_size_elems * sizeof(half);
// sample size
uint sample_size = num_rows * num_cols;
// output size
uint output_size_elems = kTileDim * kTileDim * kRowTilesPerStep * kColTilesPerStep;
uint output_size_bytes = output_size_elems * sizeof(float);
// staging area size
uint staging_area_size_bytes =
output_size_bytes > interaction_ugrad_2D_size_bytes ? output_size_bytes : interaction_ugrad_2D_size_bytes;
// Shared memory size
uint shared_mem_per_warp_size_byte = input_size_bytes + staging_area_size_bytes;
uint shared_mem_size_bytes = kWarpsPerBlock * shared_mem_per_warp_size_byte;
uint num_blocks = (batch_size + kWarpsPerBlock - 1) >> kWarpsPerBlockLog2;
uint num_row_steps = num_row_tiles / row_tiles_per_step;
uint num_col_steps = num_col_tiles / kColTilesPerStep;
bool float4_predicate = !((interaction_ugrad_size_with_padding & 7) || (num_cols & 7));
if (float4_predicate) {
dotBasedInteractBwdKernel<kWarpsPerBlock,
kNumThreads,
kRowTilesPerStep,
kColTilesPerStep,
kWarpSize,
kWarpSizeLog2,
kTileDim,
kTileDimLog2>
<<<num_blocks, kNumThreads, shared_mem_size_bytes>>>((const half *)input,
(const half *)upstream_grad,
(half *)grad,
(half *)bottom_mlp_grad,
batch_size,
num_rows,
num_cols,
num_rows_after_padding,
num_cols_after_padding,
sample_size,
interaction_ugrad_size,
interaction_ugrad_size_with_padding,
interaction_ugrad_2D_size_elems,
interaction_ugrad_2D_stride,
input_size_elems,
input_stride,
num_row_steps,
num_col_steps,
row_tiles_per_step,
shared_mem_per_warp_size_byte);
} else {
dotBasedInteractBwdKernelNonAligned<kWarpsPerBlock,
kNumThreads,
kRowTilesPerStep,
kColTilesPerStep,
kWarpSize,
kWarpSizeLog2,
kTileDim,
kTileDimLog2>
<<<num_blocks, kNumThreads, shared_mem_size_bytes>>>((const half *)input,
(const half *)upstream_grad,
(half *)grad,
(half *)bottom_mlp_grad,
batch_size,
num_rows,
num_cols,
num_rows_after_padding,
num_cols_after_padding,
sample_size,
interaction_ugrad_size,
interaction_ugrad_size_with_padding,
interaction_ugrad_2D_size_elems,
interaction_ugrad_2D_stride,
input_size_elems,
input_stride,
num_row_steps,
num_col_steps,
row_tiles_per_step,
shared_mem_per_warp_size_byte);
}
}
|
dca380f137f609e8e7c077958c3badb4c6c9293f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const Dtype* const label_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
//maxidx in n-th num, label_gt-th channel
int maxidx = -1;
int c_gt = label_data[n];
const Dtype* const bottom_slice_gt =
bottom_data + (n * channels + c_gt) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice_gt[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice_gt[maxidx];
}
}
}
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
top_data[index] = bottom_slice[maxidx];
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
void PoolingGTLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, label_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
default:
LOG(FATAL) << "Unknown pooling method in PoolingGTLayer.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingGTLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingGTLayer);
} // namespace caffe
| dca380f137f609e8e7c077958c3badb4c6c9293f.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const Dtype* const label_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
//maxidx in n-th num, label_gt-th channel
int maxidx = -1;
int c_gt = label_data[n];
const Dtype* const bottom_slice_gt =
bottom_data + (n * channels + c_gt) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice_gt[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice_gt[maxidx];
}
}
}
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
top_data[index] = bottom_slice[maxidx];
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
void PoolingGTLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, label_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
default:
LOG(FATAL) << "Unknown pooling method in PoolingGTLayer.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingGTLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingGTLayer);
} // namespace caffe
|
91926494926e048b8a91c89b46f0331d17f99ed9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19) {
for (int i=0; i < var_1; ++i) {
comp = -1.9786E-21f * +1.4840E-41f;
if (comp <= (-0.0f + (-1.7760E35f / var_3 + (var_4 * var_5)))) {
float tmp_1 = -1.4259E-37f;
comp += tmp_1 + var_6 / (-1.2181E-28f - var_7 / var_8 * (var_9 * -0.0f));
float tmp_2 = powf(var_10 - -1.1517E13f / var_11, +1.0803E-41f);
comp = tmp_2 * var_12 - (+0.0f - (+1.0986E-37f * -1.5303E-35f * (var_13 + -1.8656E-5f)));
}
for (int i=0; i < var_2; ++i) {
float tmp_3 = (-1.1301E-42f * log10f(+1.8012E-26f));
comp += tmp_3 * var_14 - +1.8300E34f - asinf(var_15 * var_16 - -1.0981E-42f);
float tmp_4 = -0.0f / (var_17 + var_18);
comp += tmp_4 * +0.0f - coshf(ceilf((+0.0f / var_19)));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20);
hipDeviceSynchronize();
return 0;
}
| 91926494926e048b8a91c89b46f0331d17f99ed9.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19) {
for (int i=0; i < var_1; ++i) {
comp = -1.9786E-21f * +1.4840E-41f;
if (comp <= (-0.0f + (-1.7760E35f / var_3 + (var_4 * var_5)))) {
float tmp_1 = -1.4259E-37f;
comp += tmp_1 + var_6 / (-1.2181E-28f - var_7 / var_8 * (var_9 * -0.0f));
float tmp_2 = powf(var_10 - -1.1517E13f / var_11, +1.0803E-41f);
comp = tmp_2 * var_12 - (+0.0f - (+1.0986E-37f * -1.5303E-35f * (var_13 + -1.8656E-5f)));
}
for (int i=0; i < var_2; ++i) {
float tmp_3 = (-1.1301E-42f * log10f(+1.8012E-26f));
comp += tmp_3 * var_14 - +1.8300E34f - asinf(var_15 * var_16 - -1.0981E-42f);
float tmp_4 = -0.0f / (var_17 + var_18);
comp += tmp_4 * +0.0f - coshf(ceilf((+0.0f / var_19)));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20);
cudaDeviceSynchronize();
return 0;
}
|
a7b8bbbd3ce8d458213e167689c4e4f9b162f24c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/apply_proximal_gradient_descent_impl.cuh"
#include <algorithm>
#include "include/hip/hip_fp16.h"
template <typename T>
__device__ __forceinline__ T RsqrtFunc(T x) {
return rsqrt(x);
}
template <>
__device__ __forceinline__ half RsqrtFunc(half x) {
return hrsqrt(x);
}
template <typename T>
__device__ __forceinline__ T AbsFunc(T x) {
return abs(x);
}
template <>
__device__ __forceinline__ half AbsFunc(half x) {
return abs(__half2float(x));
}
template <typename T>
__device__ __forceinline__ T MaxFunc(T x, T y) {
return max(x, y);
}
template <>
__device__ __forceinline__ half MaxFunc(half x, half y) {
return max(__half2float(x), __half2float(y));
}
template <typename T>
__device__ __forceinline__ T SgnFunc(T x) {
return static_cast<T>(x != 0 ? (x > 0 ? 1 : -1) : 0);
}
template <>
__device__ __forceinline__ half SgnFunc(half x) {
return __float2half(__half2float(x) != 0 ? (__half2float(x) > 0 ? 1 : -1) : 0);
}
template <typename T>
__global__ void CalApplyProximalGradientDescentKernel(const size_t input_elements, T *var, const T *alpha, const T *l1,
const T *l2, const T *delta, T *output) {
if (l1[0] > static_cast<T>(0.0)) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < static_cast<int>(input_elements);
pos += gridDim.x * blockDim.x) {
auto prox_v = var[pos];
prox_v -= delta[pos] * alpha[0];
var[pos] = SgnFunc(prox_v) * MaxFunc(AbsFunc(prox_v) - alpha[0] * l1[0], static_cast<T>(0.0)) /
(static_cast<T>(1) + l2[0] * alpha[0]);
}
} else {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < static_cast<int>(input_elements);
pos += gridDim.x * blockDim.x) {
auto prox_v = var[pos];
prox_v -= delta[pos] * alpha[0];
var[pos] = prox_v / (static_cast<T>(1) + l2[0] * alpha[0]);
}
}
}
template <typename T>
hipError_t CalApplyProximalGradientDescent(const size_t input_elements, T *var, const T *alpha, const T *l1,
const T *l2, const T *delta, T *output, const uint32_t &device_id,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( CalApplyProximalGradientDescentKernel), dim3(CUDA_BLOCKS(device_id, input_elements)), dim3(CUDA_THREADS(device_id)), 0,
cuda_stream, input_elements, var, alpha, l1, l2, delta, output);
CHECK_CUDA_LAUNCH_SUCCESS();
}
template CUDA_LIB_EXPORT hipError_t CalApplyProximalGradientDescent<float>(const size_t size, float *var,
const float *alpha, const float *l1,
const float *l2, const float *delta,
float *output, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t CalApplyProximalGradientDescent<half>(const size_t size, half *var,
const half *alpha, const half *l1,
const half *l2, const half *delta,
half *output, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t CalApplyProximalGradientDescent<double>(const size_t size, double *var,
const double *alpha, const double *l1,
const double *l2, const double *delta,
double *output, const uint32_t &device_id,
hipStream_t cuda_stream);
| a7b8bbbd3ce8d458213e167689c4e4f9b162f24c.cu | /**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/apply_proximal_gradient_descent_impl.cuh"
#include <algorithm>
#include "include/cuda_fp16.h"
template <typename T>
__device__ __forceinline__ T RsqrtFunc(T x) {
return rsqrt(x);
}
template <>
__device__ __forceinline__ half RsqrtFunc(half x) {
return hrsqrt(x);
}
template <typename T>
__device__ __forceinline__ T AbsFunc(T x) {
return abs(x);
}
template <>
__device__ __forceinline__ half AbsFunc(half x) {
return abs(__half2float(x));
}
template <typename T>
__device__ __forceinline__ T MaxFunc(T x, T y) {
return max(x, y);
}
template <>
__device__ __forceinline__ half MaxFunc(half x, half y) {
return max(__half2float(x), __half2float(y));
}
template <typename T>
__device__ __forceinline__ T SgnFunc(T x) {
return static_cast<T>(x != 0 ? (x > 0 ? 1 : -1) : 0);
}
template <>
__device__ __forceinline__ half SgnFunc(half x) {
return __float2half(__half2float(x) != 0 ? (__half2float(x) > 0 ? 1 : -1) : 0);
}
template <typename T>
__global__ void CalApplyProximalGradientDescentKernel(const size_t input_elements, T *var, const T *alpha, const T *l1,
const T *l2, const T *delta, T *output) {
if (l1[0] > static_cast<T>(0.0)) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < static_cast<int>(input_elements);
pos += gridDim.x * blockDim.x) {
auto prox_v = var[pos];
prox_v -= delta[pos] * alpha[0];
var[pos] = SgnFunc(prox_v) * MaxFunc(AbsFunc(prox_v) - alpha[0] * l1[0], static_cast<T>(0.0)) /
(static_cast<T>(1) + l2[0] * alpha[0]);
}
} else {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < static_cast<int>(input_elements);
pos += gridDim.x * blockDim.x) {
auto prox_v = var[pos];
prox_v -= delta[pos] * alpha[0];
var[pos] = prox_v / (static_cast<T>(1) + l2[0] * alpha[0]);
}
}
}
template <typename T>
cudaError_t CalApplyProximalGradientDescent(const size_t input_elements, T *var, const T *alpha, const T *l1,
const T *l2, const T *delta, T *output, const uint32_t &device_id,
cudaStream_t cuda_stream) {
CalApplyProximalGradientDescentKernel<<<CUDA_BLOCKS(device_id, input_elements), CUDA_THREADS(device_id), 0,
cuda_stream>>>(input_elements, var, alpha, l1, l2, delta, output);
CHECK_CUDA_LAUNCH_SUCCESS();
}
template CUDA_LIB_EXPORT cudaError_t CalApplyProximalGradientDescent<float>(const size_t size, float *var,
const float *alpha, const float *l1,
const float *l2, const float *delta,
float *output, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t CalApplyProximalGradientDescent<half>(const size_t size, half *var,
const half *alpha, const half *l1,
const half *l2, const half *delta,
half *output, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t CalApplyProximalGradientDescent<double>(const size_t size, double *var,
const double *alpha, const double *l1,
const double *l2, const double *delta,
double *output, const uint32_t &device_id,
cudaStream_t cuda_stream);
|
4ccb9d09ba02f9511c8908c36b332269d91478d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#define tw 2
__global__ void matmul(int *a,int *b, int *c, int n){
int ix=tw*blockIdx.x+threadIdx.x;
int iy=tw*blockIdx.y+threadIdx.y;
int idx=n*iy+ix;
c[idx]=0;
for(int k=0; k<n; k++){
c[idx]+=a[ix*n+k]*b[k*n+iy];
}
}
int main(){
int n;
scanf("%d",&n);
int *a;
int *b;
int *c;
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
scanf("%d",&a[i][j]);
}
}
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
scanf("%d",&b[i][j]);
}
}
int *da,*db,*dc;
hipMalloc((void**)&da,n*n*sizeof(int));
hipMalloc((void**)&db,n*n*sizeof(int));
hipMalloc((void**)&dc,n*n*sizeof(int));
hipMemcpy(da,a,n*n*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(db,b,n*n*sizeof(int),hipMemcpyHostToDevice);
dim3 griddim(ceil(n*1.0/tw),ceil(n*1.0/tw),1);
dim3 blockdim(tw,tw,1);
matmul<<<(griddim,blockdim)>>>(da,db,dc,n);
hipMemcpy(c,dc,n*n*sizeof(int),hipMemcpyDeviceToHost);
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
printf("%d ",c[i][j]);
}
printf("\n");
}
}
| 4ccb9d09ba02f9511c8908c36b332269d91478d9.cu |
#include<stdio.h>
#define tw 2
__global__ void matmul(int *a,int *b, int *c, int n){
int ix=tw*blockIdx.x+threadIdx.x;
int iy=tw*blockIdx.y+threadIdx.y;
int idx=n*iy+ix;
c[idx]=0;
for(int k=0; k<n; k++){
c[idx]+=a[ix*n+k]*b[k*n+iy];
}
}
int main(){
int n;
scanf("%d",&n);
int *a;
int *b;
int *c;
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
scanf("%d",&a[i][j]);
}
}
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
scanf("%d",&b[i][j]);
}
}
int *da,*db,*dc;
cudaMalloc((void**)&da,n*n*sizeof(int));
cudaMalloc((void**)&db,n*n*sizeof(int));
cudaMalloc((void**)&dc,n*n*sizeof(int));
cudaMemcpy(da,a,n*n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(db,b,n*n*sizeof(int),cudaMemcpyHostToDevice);
dim3 griddim(ceil(n*1.0/tw),ceil(n*1.0/tw),1);
dim3 blockdim(tw,tw,1);
matmul<<<(griddim,blockdim)>>>(da,db,dc,n);
cudaMemcpy(c,dc,n*n*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
printf("%d ",c[i][j]);
}
printf("\n");
}
}
|
d1e5372e373fdd543bdeedab6232ac8ca33617fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) ICG. All rights reserved.
*
* Institute for Computer Graphics and Vision
* Graz University of Technology / Austria
*
*
* This software is distributed WITHOUT ANY WARRANTY; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notices for more information.
*
*
* Project : ImageUtilities
* Module : Core
* Class : none
* Language : C/CUDA
* Description : CUDA kernels for core functions
*
* Author : Manuel Werlberger
* EMail : werlberger@icg.tugraz.at
*
*/
#ifndef IUCORE_CONVERT_CU
#define IUCORE_CONVERT_CU
//#include <cutil_math.h>
#include "/usr/local/cuda/samples/common/inc/helper_math.h"
#include "coredefs.h"
#include "memorydefs.h"
#include "iutextures.cuh"
namespace iuprivate {
/* ***************************************************************************
* CUDA KERNELS
* ***************************************************************************/
//-----------------------------------------------------------------------------
/** convert kernel 32f_C3 -> 32f_C4 (float3 -> float4)
*/
__global__ void cuConvertC3ToC4Kernel(const float3* src, size_t src_stride, int src_width, int src_height,
float4* dst, size_t dst_stride, int dst_width, int dst_height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int src_c = y*src_stride + x;
int dst_c = y*dst_stride + x;
if (x<src_width && y<src_height && x<dst_width && y<dst_height)
{
float3 val=src[src_c];
dst[dst_c] = make_float4(val.x, val.y, val.z, 1.0f);
}
}
//-----------------------------------------------------------------------------
/** convert kernel 32f_C4 -> 32f_C3 (float4 -> float3)
*/
__global__ void cuConvertC4ToC3Kernel(const float4* src, size_t src_stride, int src_width, int src_height,
float3* dst, size_t dst_stride, int dst_width, int dst_height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int src_c = y*src_stride + x;
int dst_c = y*dst_stride + x;
if (x<src_width && y<src_height && x<dst_width && y<dst_height)
{
float4 val=src[src_c];
dst[dst_c] = make_float3(val.x, val.y, val.z);
}
}
//-----------------------------------------------------------------------------
/** convert kernel 8u_C1 -> 32f_C1 (unsigned char -> float)
*/
__global__ void cuConvert8uC1To32fC1Kernel(const unsigned char *src, size_t src_stride, int src_width, int src_height,
float* dst, size_t dst_stride, int dst_width, int dst_height, float mul_constant,
float add_constant)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int src_c = y*src_stride + x;
int dst_c = y*dst_stride + x;
if (x<src_width && y<src_height && x<dst_width && y<dst_height)
{
dst[dst_c] = src[src_c] * mul_constant + add_constant;
}
}
//-----------------------------------------------------------------------------
/** convert kernel 32f_C1 -> 8u_C1 (float -> unsigned char)
*/
__global__ void cuConvert32fC1To8uC1Kernel(const float* src, size_t src_stride, int src_width, int src_height,
unsigned char* dst, size_t dst_stride, int dst_width, int dst_height, float mul_constant,
unsigned char add_constant)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int src_c = y*src_stride + x;
int dst_c = y*dst_stride + x;
if (x<src_width && y<src_height && x<dst_width && y<dst_height)
{
dst[dst_c] = src[src_c] * mul_constant + add_constant;
}
}
//-----------------------------------------------------------------------------
/** convert kernel 32f_C4 -> 8u_C4 (float4 -> unsigned char4)
*/
__global__ void cuConvert32fC4To8uC4Kernel(const float4* src, size_t src_stride, int src_width, int src_height,
uchar4* dst, size_t dst_stride, int dst_width, int dst_height, float mul_constant,
unsigned char add_constant)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int src_c = y*src_stride + x;
int dst_c = y*dst_stride + x;
if (x<src_width && y<src_height && x<dst_width && y<dst_height)
{
float4 val = src[src_c];
uchar4 res;
res.x = val.x * mul_constant + add_constant;
res.y = val.y * mul_constant + add_constant;
res.z = val.z * mul_constant + add_constant;
res.w = val.w * mul_constant + add_constant;
dst[dst_c] = res;
}
}
//-----------------------------------------------------------------------------
/** convert kernel rgb -> hsv
*/
__global__ void cuConvertRGBToHSVKernel(const float4* src, float4* dst, size_t stride,
int width, int height, bool normalize)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*stride + x;
if (x<width && y<height)
{
// Read
float4 in = src[c];
float R = in.x;
float G = in.y;
float B = in.z;
float Ma = IUMAX(R, IUMAX(G, B));
float mi = IUMIN(R, IUMIN(G, B));
float C = Ma-mi;
// Hue
float H = 0.0f;
if (C != 0.0f)
{
if (Ma == R)
H = fmod((G - B)/C, 6.0f);
if (Ma == G)
H = (B - R)/C + 2.0f;
if (Ma == B)
H = (R - G)/C + 4.0f;
}
H *= 60.0f;
// Value
float V = Ma;
// Saturation
float S = 0.0f;
if (C != 0.0f)
S = C/V;
if (H < 0.0f)
H += 360.0f;
// Normalize
if (normalize)
H /= 360.0f;
// Write Back
dst[c] = make_float4(H, S, V, in.w);
}
}
//-----------------------------------------------------------------------------
/** convert kernel hsv -> rgb
*/
__global__ void cuConvertHSVToRGBKernel(const float4* src, float4* dst, size_t stride,
int width, int height, bool denormalize)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*stride + x;
if (x<width && y<height)
{
// Read
float4 in = src[c];
float H = in.x;
float S = in.y;
float V = in.z;
float4 rgb = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// float C = V*S;
// // Denormalize
// if (denormalize)
// H = H*360.0f;
// // RGB
// H /= 60.0f;
// float X = C*(1.0f - abs(fmod(H, 2.0f) - 1.0f));
// if (H >= 0.0f)
// {
// if (H < 1.0f)
// rgb = make_float4(C, X, 0.0f, 0.0f);
// else if (H < 2.0f)
// rgb = make_float4(X, C, 0.0f, 0.0f);
// else if (H < 3.0f)
// rgb = make_float4(0.0f, C, X, 0.0f);
// else if (H < 4.0f)
// rgb = make_float4(0.0f, X, C, 0.0f);
// else if (H < 5.0f)
// rgb = make_float4(X, 0.0f, C, 0.0f);
// else if (H <= 6.0f)
// rgb = make_float4(C, 0.0f, X, 0.0f);
// }
// float m = V-C;
// rgb += m;
if (S == 0)
{
rgb = make_float4(V, V, V, in.w);
dst[c] = rgb;
return;
}
H /= 60.0f;
int i = floor(H);
float f = H-i;
float p = V*(1.0f - S);
float q = V*(1.0f - S*f);
float t = V*(1.0f - S*(1.0f-f));
if (i == 0)
rgb = make_float4(V, t, p, in.w);
else if (i == 1)
rgb = make_float4(q, V, p, in.w);
else if (i == 2)
rgb = make_float4(p, V, t, in.w);
else if (i == 3)
rgb = make_float4(p, q, V, in.w);
else if (i == 4)
rgb = make_float4(t, p, V, in.w);
else if (i == 5)
rgb = make_float4(V, p, q, in.w);
// Write Back
rgb.w = in.w;
dst[c] = rgb;
}
}
/* ***************************************************************************
* CUDA WRAPPERS
* ***************************************************************************/
//-----------------------------------------------------------------------------
/** convert kernel 32f_C3 -> 32f_C4 (float3 -> float4)
*/
IuStatus cuConvert(const iu::ImageGpu_32f_C3* src, const IuRect& src_roi,
iu::ImageGpu_32f_C4* dst, const IuRect& dst_roi)
{
// fragmentation
const unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst_roi.width - dst_roi.x, dimBlock.x),
iu::divUp(dst_roi.height - dst_roi.y, dimBlock.y));
hipLaunchKernelGGL(( cuConvertC3ToC4Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, src->data(src_roi.x, src_roi.y), src->stride(),
src_roi.width, src_roi.height,
dst->data(dst_roi.x, dst_roi.y), dst->stride(),
dst_roi.width, dst_roi.height);
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
//-----------------------------------------------------------------------------
/** convert kernel 32f_C4 -> 32f_C3 (float4 -> float3)
*/
IuStatus cuConvert(const iu::ImageGpu_32f_C4* src, const IuRect& src_roi,
iu::ImageGpu_32f_C3* dst, const IuRect& dst_roi)
{
// fragmentation
const unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst_roi.width - dst_roi.x, dimBlock.x),
iu::divUp(dst_roi.height - dst_roi.y, dimBlock.y));
hipLaunchKernelGGL(( cuConvertC4ToC3Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, src->data(src_roi.x, src_roi.y), src->stride(),
src_roi.width, src_roi.height,
dst->data(dst_roi.x, dst_roi.y), dst->stride(),
dst_roi.width, dst_roi.height);
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
//-----------------------------------------------------------------------------
IuStatus cuConvert_8u_32f(const iu::ImageGpu_8u_C1* src, const IuRect& src_roi,
iu::ImageGpu_32f_C1* dst, const IuRect& dst_roi, float mul_constant,
float add_constant)
{
// fragmentation
const unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst_roi.width - dst_roi.x, dimBlock.x),
iu::divUp(dst_roi.height - dst_roi.y, dimBlock.y));
hipLaunchKernelGGL(( cuConvert8uC1To32fC1Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, src->data(src_roi.x, src_roi.y),
src->stride(), src_roi.width, src_roi.height,
dst->data(dst_roi.x, dst_roi.y),
dst->stride(), dst_roi.width, dst_roi.height,
mul_constant, add_constant);
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
//-----------------------------------------------------------------------------
IuStatus cuConvert_32f_8u(const iu::ImageGpu_32f_C1* src, const IuRect& src_roi,
iu::ImageGpu_8u_C1* dst, const IuRect& dst_roi, float mul_constant,
unsigned char add_constant)
{
// fragmentation
const unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst_roi.width - dst_roi.x, dimBlock.x),
iu::divUp(dst_roi.height - dst_roi.y, dimBlock.y));
hipLaunchKernelGGL(( cuConvert32fC1To8uC1Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, src->data(src_roi.x, src_roi.y), src->stride(),
src_roi.width, src_roi.height,
dst->data(dst_roi.x, dst_roi.y),
dst->stride(), dst_roi.width, dst_roi.height,
mul_constant, add_constant);
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
//-----------------------------------------------------------------------------
IuStatus cuConvert_32f_8u(const iu::ImageGpu_32f_C4* src, const IuRect& src_roi,
iu::ImageGpu_8u_C4* dst, const IuRect& dst_roi, float mul_constant,
unsigned char add_constant)
{
// fragmentation
const unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst_roi.width - dst_roi.x, dimBlock.x),
iu::divUp(dst_roi.height - dst_roi.y, dimBlock.y));
hipLaunchKernelGGL(( cuConvert32fC4To8uC4Kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, src->data(src_roi.x, src_roi.y),
src->stride(), src_roi.width, src_roi.height,
dst->data(dst_roi.x, dst_roi.y),
dst->stride(), dst_roi.width,
dst_roi.height, mul_constant, add_constant);
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
//-----------------------------------------------------------------------------
IuStatus cuConvert_rgb_to_hsv(const iu::ImageGpu_32f_C4* src, iu::ImageGpu_32f_C4* dst,
bool normalize)
{
// fragmentation
const unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(src->width(), dimBlock.x),
iu::divUp(src->height(), dimBlock.y));
hipLaunchKernelGGL(( cuConvertRGBToHSVKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, src->data(), dst->data(), src->stride(),
src->width(), src->height(), normalize);
hipDeviceSynchronize();
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
//-----------------------------------------------------------------------------
IuStatus cuConvert_hsv_to_rgb(const iu::ImageGpu_32f_C4* src, iu::ImageGpu_32f_C4* dst,
bool denormalize)
{
// fragmentation
const unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(src->width(), dimBlock.x),
iu::divUp(src->height(), dimBlock.y));
hipLaunchKernelGGL(( cuConvertHSVToRGBKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, src->data(), dst->data(), src->stride(),
src->width(), src->height(), denormalize);
hipDeviceSynchronize();
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
} // namespace iuprivate
#endif // IUCORE_CONVERT_CU
| d1e5372e373fdd543bdeedab6232ac8ca33617fd.cu | /*
* Copyright (c) ICG. All rights reserved.
*
* Institute for Computer Graphics and Vision
* Graz University of Technology / Austria
*
*
* This software is distributed WITHOUT ANY WARRANTY; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notices for more information.
*
*
* Project : ImageUtilities
* Module : Core
* Class : none
* Language : C/CUDA
* Description : CUDA kernels for core functions
*
* Author : Manuel Werlberger
* EMail : werlberger@icg.tugraz.at
*
*/
#ifndef IUCORE_CONVERT_CU
#define IUCORE_CONVERT_CU
//#include <cutil_math.h>
#include "/usr/local/cuda/samples/common/inc/helper_math.h"
#include "coredefs.h"
#include "memorydefs.h"
#include "iutextures.cuh"
namespace iuprivate {
/* ***************************************************************************
* CUDA KERNELS
* ***************************************************************************/
//-----------------------------------------------------------------------------
/** convert kernel 32f_C3 -> 32f_C4 (float3 -> float4)
*/
__global__ void cuConvertC3ToC4Kernel(const float3* src, size_t src_stride, int src_width, int src_height,
float4* dst, size_t dst_stride, int dst_width, int dst_height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int src_c = y*src_stride + x;
int dst_c = y*dst_stride + x;
if (x<src_width && y<src_height && x<dst_width && y<dst_height)
{
float3 val=src[src_c];
dst[dst_c] = make_float4(val.x, val.y, val.z, 1.0f);
}
}
//-----------------------------------------------------------------------------
/** convert kernel 32f_C4 -> 32f_C3 (float4 -> float3)
*/
__global__ void cuConvertC4ToC3Kernel(const float4* src, size_t src_stride, int src_width, int src_height,
float3* dst, size_t dst_stride, int dst_width, int dst_height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int src_c = y*src_stride + x;
int dst_c = y*dst_stride + x;
if (x<src_width && y<src_height && x<dst_width && y<dst_height)
{
float4 val=src[src_c];
dst[dst_c] = make_float3(val.x, val.y, val.z);
}
}
//-----------------------------------------------------------------------------
/** convert kernel 8u_C1 -> 32f_C1 (unsigned char -> float)
*/
__global__ void cuConvert8uC1To32fC1Kernel(const unsigned char *src, size_t src_stride, int src_width, int src_height,
float* dst, size_t dst_stride, int dst_width, int dst_height, float mul_constant,
float add_constant)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int src_c = y*src_stride + x;
int dst_c = y*dst_stride + x;
if (x<src_width && y<src_height && x<dst_width && y<dst_height)
{
dst[dst_c] = src[src_c] * mul_constant + add_constant;
}
}
//-----------------------------------------------------------------------------
/** convert kernel 32f_C1 -> 8u_C1 (float -> unsigned char)
*/
__global__ void cuConvert32fC1To8uC1Kernel(const float* src, size_t src_stride, int src_width, int src_height,
unsigned char* dst, size_t dst_stride, int dst_width, int dst_height, float mul_constant,
unsigned char add_constant)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int src_c = y*src_stride + x;
int dst_c = y*dst_stride + x;
if (x<src_width && y<src_height && x<dst_width && y<dst_height)
{
dst[dst_c] = src[src_c] * mul_constant + add_constant;
}
}
//-----------------------------------------------------------------------------
/** convert kernel 32f_C4 -> 8u_C4 (float4 -> unsigned char4)
*/
__global__ void cuConvert32fC4To8uC4Kernel(const float4* src, size_t src_stride, int src_width, int src_height,
uchar4* dst, size_t dst_stride, int dst_width, int dst_height, float mul_constant,
unsigned char add_constant)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int src_c = y*src_stride + x;
int dst_c = y*dst_stride + x;
if (x<src_width && y<src_height && x<dst_width && y<dst_height)
{
float4 val = src[src_c];
uchar4 res;
res.x = val.x * mul_constant + add_constant;
res.y = val.y * mul_constant + add_constant;
res.z = val.z * mul_constant + add_constant;
res.w = val.w * mul_constant + add_constant;
dst[dst_c] = res;
}
}
//-----------------------------------------------------------------------------
/** convert kernel rgb -> hsv
*/
__global__ void cuConvertRGBToHSVKernel(const float4* src, float4* dst, size_t stride,
int width, int height, bool normalize)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*stride + x;
if (x<width && y<height)
{
// Read
float4 in = src[c];
float R = in.x;
float G = in.y;
float B = in.z;
float Ma = IUMAX(R, IUMAX(G, B));
float mi = IUMIN(R, IUMIN(G, B));
float C = Ma-mi;
// Hue
float H = 0.0f;
if (C != 0.0f)
{
if (Ma == R)
H = fmod((G - B)/C, 6.0f);
if (Ma == G)
H = (B - R)/C + 2.0f;
if (Ma == B)
H = (R - G)/C + 4.0f;
}
H *= 60.0f;
// Value
float V = Ma;
// Saturation
float S = 0.0f;
if (C != 0.0f)
S = C/V;
if (H < 0.0f)
H += 360.0f;
// Normalize
if (normalize)
H /= 360.0f;
// Write Back
dst[c] = make_float4(H, S, V, in.w);
}
}
//-----------------------------------------------------------------------------
/** convert kernel hsv -> rgb
*/
__global__ void cuConvertHSVToRGBKernel(const float4* src, float4* dst, size_t stride,
int width, int height, bool denormalize)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*stride + x;
if (x<width && y<height)
{
// Read
float4 in = src[c];
float H = in.x;
float S = in.y;
float V = in.z;
float4 rgb = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
// float C = V*S;
// // Denormalize
// if (denormalize)
// H = H*360.0f;
// // RGB
// H /= 60.0f;
// float X = C*(1.0f - abs(fmod(H, 2.0f) - 1.0f));
// if (H >= 0.0f)
// {
// if (H < 1.0f)
// rgb = make_float4(C, X, 0.0f, 0.0f);
// else if (H < 2.0f)
// rgb = make_float4(X, C, 0.0f, 0.0f);
// else if (H < 3.0f)
// rgb = make_float4(0.0f, C, X, 0.0f);
// else if (H < 4.0f)
// rgb = make_float4(0.0f, X, C, 0.0f);
// else if (H < 5.0f)
// rgb = make_float4(X, 0.0f, C, 0.0f);
// else if (H <= 6.0f)
// rgb = make_float4(C, 0.0f, X, 0.0f);
// }
// float m = V-C;
// rgb += m;
if (S == 0)
{
rgb = make_float4(V, V, V, in.w);
dst[c] = rgb;
return;
}
H /= 60.0f;
int i = floor(H);
float f = H-i;
float p = V*(1.0f - S);
float q = V*(1.0f - S*f);
float t = V*(1.0f - S*(1.0f-f));
if (i == 0)
rgb = make_float4(V, t, p, in.w);
else if (i == 1)
rgb = make_float4(q, V, p, in.w);
else if (i == 2)
rgb = make_float4(p, V, t, in.w);
else if (i == 3)
rgb = make_float4(p, q, V, in.w);
else if (i == 4)
rgb = make_float4(t, p, V, in.w);
else if (i == 5)
rgb = make_float4(V, p, q, in.w);
// Write Back
rgb.w = in.w;
dst[c] = rgb;
}
}
/* ***************************************************************************
* CUDA WRAPPERS
* ***************************************************************************/
//-----------------------------------------------------------------------------
/** convert kernel 32f_C3 -> 32f_C4 (float3 -> float4)
*/
IuStatus cuConvert(const iu::ImageGpu_32f_C3* src, const IuRect& src_roi,
iu::ImageGpu_32f_C4* dst, const IuRect& dst_roi)
{
// fragmentation
const unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst_roi.width - dst_roi.x, dimBlock.x),
iu::divUp(dst_roi.height - dst_roi.y, dimBlock.y));
cuConvertC3ToC4Kernel<<<dimGrid, dimBlock>>>(src->data(src_roi.x, src_roi.y), src->stride(),
src_roi.width, src_roi.height,
dst->data(dst_roi.x, dst_roi.y), dst->stride(),
dst_roi.width, dst_roi.height);
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
//-----------------------------------------------------------------------------
/** convert kernel 32f_C4 -> 32f_C3 (float4 -> float3)
*/
IuStatus cuConvert(const iu::ImageGpu_32f_C4* src, const IuRect& src_roi,
iu::ImageGpu_32f_C3* dst, const IuRect& dst_roi)
{
// fragmentation
const unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst_roi.width - dst_roi.x, dimBlock.x),
iu::divUp(dst_roi.height - dst_roi.y, dimBlock.y));
cuConvertC4ToC3Kernel<<<dimGrid, dimBlock>>>(src->data(src_roi.x, src_roi.y), src->stride(),
src_roi.width, src_roi.height,
dst->data(dst_roi.x, dst_roi.y), dst->stride(),
dst_roi.width, dst_roi.height);
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
//-----------------------------------------------------------------------------
IuStatus cuConvert_8u_32f(const iu::ImageGpu_8u_C1* src, const IuRect& src_roi,
iu::ImageGpu_32f_C1* dst, const IuRect& dst_roi, float mul_constant,
float add_constant)
{
// fragmentation
const unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst_roi.width - dst_roi.x, dimBlock.x),
iu::divUp(dst_roi.height - dst_roi.y, dimBlock.y));
cuConvert8uC1To32fC1Kernel<<<dimGrid, dimBlock>>>(src->data(src_roi.x, src_roi.y),
src->stride(), src_roi.width, src_roi.height,
dst->data(dst_roi.x, dst_roi.y),
dst->stride(), dst_roi.width, dst_roi.height,
mul_constant, add_constant);
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
//-----------------------------------------------------------------------------
IuStatus cuConvert_32f_8u(const iu::ImageGpu_32f_C1* src, const IuRect& src_roi,
iu::ImageGpu_8u_C1* dst, const IuRect& dst_roi, float mul_constant,
unsigned char add_constant)
{
// fragmentation
const unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst_roi.width - dst_roi.x, dimBlock.x),
iu::divUp(dst_roi.height - dst_roi.y, dimBlock.y));
cuConvert32fC1To8uC1Kernel<<<dimGrid, dimBlock>>>(src->data(src_roi.x, src_roi.y), src->stride(),
src_roi.width, src_roi.height,
dst->data(dst_roi.x, dst_roi.y),
dst->stride(), dst_roi.width, dst_roi.height,
mul_constant, add_constant);
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
//-----------------------------------------------------------------------------
IuStatus cuConvert_32f_8u(const iu::ImageGpu_32f_C4* src, const IuRect& src_roi,
iu::ImageGpu_8u_C4* dst, const IuRect& dst_roi, float mul_constant,
unsigned char add_constant)
{
// fragmentation
const unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(dst_roi.width - dst_roi.x, dimBlock.x),
iu::divUp(dst_roi.height - dst_roi.y, dimBlock.y));
cuConvert32fC4To8uC4Kernel<<<dimGrid, dimBlock>>>(src->data(src_roi.x, src_roi.y),
src->stride(), src_roi.width, src_roi.height,
dst->data(dst_roi.x, dst_roi.y),
dst->stride(), dst_roi.width,
dst_roi.height, mul_constant, add_constant);
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
//-----------------------------------------------------------------------------
IuStatus cuConvert_rgb_to_hsv(const iu::ImageGpu_32f_C4* src, iu::ImageGpu_32f_C4* dst,
bool normalize)
{
// fragmentation
const unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(src->width(), dimBlock.x),
iu::divUp(src->height(), dimBlock.y));
cuConvertRGBToHSVKernel<<<dimGrid, dimBlock>>>(src->data(), dst->data(), src->stride(),
src->width(), src->height(), normalize);
cudaThreadSynchronize();
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
//-----------------------------------------------------------------------------
IuStatus cuConvert_hsv_to_rgb(const iu::ImageGpu_32f_C4* src, iu::ImageGpu_32f_C4* dst,
bool denormalize)
{
// fragmentation
const unsigned int block_size = 16;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(iu::divUp(src->width(), dimBlock.x),
iu::divUp(src->height(), dimBlock.y));
cuConvertHSVToRGBKernel<<<dimGrid, dimBlock>>>(src->data(), dst->data(), src->stride(),
src->width(), src->height(), denormalize);
cudaThreadSynchronize();
IU_CHECK_AND_RETURN_CUDA_ERRORS();
}
} // namespace iuprivate
#endif // IUCORE_CONVERT_CU
|
e43da4231bae43181017858afa72026f9d136c7c.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <quda_internal.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <quda_matrix.h>
#include <float_vector.h>
#include <complex_quda.h>
namespace quda {
#ifdef GPU_GAUGE_TOOLS
template <typename Float, typename Gauge, typename Mom>
struct UpdateGaugeArg {
Gauge out;
Gauge in;
Mom momentum;
Float dt;
int nDim;
UpdateGaugeArg(const Gauge &out, const Gauge &in,
const Mom &momentum, Float dt, int nDim)
: out(out), in(in), momentum(momentum), dt(dt), nDim(nDim) { }
};
template<typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
__device__ __host__ void updateGaugeFieldCompute
(UpdateGaugeArg<Float,Gauge,Mom> &arg, int x, int parity) {
typedef complex<Float> Complex;
Matrix<Complex,3> link, result, mom;
for(int dir=0; dir<arg.nDim; ++dir){
arg.in.load((Float*)(link.data), x, dir, parity);
arg.momentum.load((Float*)(mom.data), x, dir, parity);
Complex trace = getTrace(mom);
mom(0,0) -= trace/static_cast<Float>(3.0);
mom(1,1) -= trace/static_cast<Float>(3.0);
mom(2,2) -= trace/static_cast<Float>(3.0);
if (!exact) {
result = link;
// Nth order expansion of exponential
if (!conj_mom) {
for(int r=N; r>0; r--)
result = (arg.dt/r)*mom*result + link;
} else {
for(int r=N; r>0; r--)
result = (arg.dt/r)*conj(mom)*result + link;
}
} else {
mom = arg.dt * mom;
expsu3<Float>(mom);
if (!conj_mom) {
link = mom * link;
} else {
link = conj(mom) * link;
}
result = link;
}
arg.out.save((Float*)(result.data), x, dir, parity);
} // dir
}
template<typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
void updateGaugeField(UpdateGaugeArg<Float,Gauge,Mom> arg) {
for (unsigned int parity=0; parity<2; parity++) {
for (int x=0; x<arg.out.volumeCB; x++) {
updateGaugeFieldCompute<Float,Gauge,Mom,N,conj_mom,exact>
(arg, x, parity);
}
}
}
template<typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
__global__ void updateGaugeFieldKernel(UpdateGaugeArg<Float,Gauge,Mom> arg) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= 2*arg.out.volumeCB) return;
int parity = (idx >= arg.out.volumeCB) ? 1 : 0;
idx -= parity*arg.out.volumeCB;
updateGaugeFieldCompute<Float,Gauge,Mom,N,conj_mom,exact>(arg, idx, parity);
}
template <typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
class UpdateGaugeField : public Tunable {
private:
UpdateGaugeArg<Float,Gauge,Mom> arg;
const GaugeField &meta; // meta data
const QudaFieldLocation location; // location of the lattice fields
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
unsigned int minThreads() const { return 2*arg.in.volumeCB; }
bool tuneGridDim() const { return false; }
public:
UpdateGaugeField(const UpdateGaugeArg<Float,Gauge,Mom> &arg,
const GaugeField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("threads=%d,prec=%lu,stride=%d",
2*arg.in.volumeCB, sizeof(Float), arg.in.stride);
}
virtual ~UpdateGaugeField() { }
void apply(const hipStream_t &stream){
if (location == QUDA_CUDA_FIELD_LOCATION) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( updateGaugeFieldKernel<Float,Gauge,Mom,N,conj_mom,exact>)
, dim3(tp.grid),dim3(tp.block),tp.shared_bytes, 0, arg);
} else { // run the CPU code
updateGaugeField<Float,Gauge,Mom,N,conj_mom,exact>(arg);
}
} // apply
long long flops() const {
const int Nc = 3;
return arg.nDim*2*arg.in.volumeCB*N*(Nc*Nc*2 + // scalar-matrix multiply
(8*Nc*Nc*Nc - 2*Nc*Nc) + // matrix-matrix multiply
Nc*Nc*2); // matrix-matrix addition
}
long long bytes() const { return arg.nDim*2*arg.in.volumeCB*
(arg.in.Bytes() + arg.out.Bytes() + arg.momentum.Bytes()); }
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
};
template <typename Float, typename Gauge, typename Mom>
void updateGaugeField(Gauge &out, const Gauge &in, const Mom &mom,
double dt, const GaugeField &meta, bool conj_mom, bool exact,
QudaFieldLocation location) {
// degree of exponential expansion
const int N = 8;
if (conj_mom) {
if (exact) {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,true,true> updateGauge(arg, meta, location);
updateGauge.apply(0);
} else {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,true,false> updateGauge(arg, meta, location);
updateGauge.apply(0);
}
} else {
if (exact) {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,false,true> updateGauge(arg, meta, location);
updateGauge.apply(0);
} else {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,false,false> updateGauge(arg, meta, location);
updateGauge.apply(0);
}
}
if (location == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
template <typename Float, typename Gauge>
void updateGaugeField(Gauge out, const Gauge &in, const GaugeField &mom,
double dt, bool conj_mom, bool exact,
QudaFieldLocation location) {
if (mom.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (mom.Reconstruct() == QUDA_RECONSTRUCT_10) {
// FIX ME - 11 is a misnomer to avoid confusion in template instantiation
updateGaugeField<Float>(out, in, gauge::FloatNOrder<Float,18,2,11>(mom), dt, mom, conj_mom, exact, location);
} else {
errorQuda("Reconstruction type not supported");
}
} else if (mom.Order() == QUDA_MILC_GAUGE_ORDER) {
updateGaugeField<Float>(out, in, gauge::MILCOrder<Float,10>(mom), dt, mom, conj_mom, exact, location);
} else {
errorQuda("Gauge Field order %d not supported", mom.Order());
}
}
template <typename Float>
void updateGaugeField(GaugeField &out, const GaugeField &in, const GaugeField &mom,
double dt, bool conj_mom, bool exact,
QudaFieldLocation location) {
const int Nc = 3;
if (out.Ncolor() != Nc)
errorQuda("Ncolor=%d not supported at this time", out.Ncolor());
if (out.Order() != in.Order() || out.Reconstruct() != in.Reconstruct()) {
errorQuda("Input and output gauge field ordering and reconstruction must match");
}
if (out.isNative()) {
if (out.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G;
updateGaugeField<Float>(G(out),G(in), mom, dt, conj_mom, exact, location);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G;
updateGaugeField<Float>(G(out), G(in), mom, dt, conj_mom, exact, location);
} else {
errorQuda("Reconstruction type not supported");
}
} else if (out.Order() == QUDA_MILC_GAUGE_ORDER) {
updateGaugeField<Float>(gauge::MILCOrder<Float, Nc*Nc*2>(out),
gauge::MILCOrder<Float, Nc*Nc*2>(in),
mom, dt, conj_mom, exact, location);
} else {
errorQuda("Gauge Field order %d not supported", out.Order());
}
}
#endif
void updateGaugeField(GaugeField &out, double dt, const GaugeField& in,
const GaugeField& mom, bool conj_mom, bool exact)
{
#ifdef GPU_GAUGE_TOOLS
if (out.Precision() != in.Precision() || out.Precision() != mom.Precision())
errorQuda("Gauge and momentum fields must have matching precision");
if (out.Location() != in.Location() || out.Location() != mom.Location())
errorQuda("Gauge and momentum fields must have matching location");
if (out.Precision() == QUDA_DOUBLE_PRECISION) {
updateGaugeField<double>(out, in, mom, dt, conj_mom, exact, out.Location());
} else if (out.Precision() == QUDA_SINGLE_PRECISION) {
updateGaugeField<float>(out, in, mom, dt, conj_mom, exact, out.Location());
} else {
errorQuda("Precision %d not supported", out.Precision());
}
#else
errorQuda("Gauge tools are not build");
#endif
}
} // namespace quda
| e43da4231bae43181017858afa72026f9d136c7c.cu | #include <cstdio>
#include <cstdlib>
#include <cuda.h>
#include <quda_internal.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <quda_matrix.h>
#include <float_vector.h>
#include <complex_quda.h>
namespace quda {
#ifdef GPU_GAUGE_TOOLS
template <typename Float, typename Gauge, typename Mom>
struct UpdateGaugeArg {
Gauge out;
Gauge in;
Mom momentum;
Float dt;
int nDim;
UpdateGaugeArg(const Gauge &out, const Gauge &in,
const Mom &momentum, Float dt, int nDim)
: out(out), in(in), momentum(momentum), dt(dt), nDim(nDim) { }
};
template<typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
__device__ __host__ void updateGaugeFieldCompute
(UpdateGaugeArg<Float,Gauge,Mom> &arg, int x, int parity) {
typedef complex<Float> Complex;
Matrix<Complex,3> link, result, mom;
for(int dir=0; dir<arg.nDim; ++dir){
arg.in.load((Float*)(link.data), x, dir, parity);
arg.momentum.load((Float*)(mom.data), x, dir, parity);
Complex trace = getTrace(mom);
mom(0,0) -= trace/static_cast<Float>(3.0);
mom(1,1) -= trace/static_cast<Float>(3.0);
mom(2,2) -= trace/static_cast<Float>(3.0);
if (!exact) {
result = link;
// Nth order expansion of exponential
if (!conj_mom) {
for(int r=N; r>0; r--)
result = (arg.dt/r)*mom*result + link;
} else {
for(int r=N; r>0; r--)
result = (arg.dt/r)*conj(mom)*result + link;
}
} else {
mom = arg.dt * mom;
expsu3<Float>(mom);
if (!conj_mom) {
link = mom * link;
} else {
link = conj(mom) * link;
}
result = link;
}
arg.out.save((Float*)(result.data), x, dir, parity);
} // dir
}
template<typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
void updateGaugeField(UpdateGaugeArg<Float,Gauge,Mom> arg) {
for (unsigned int parity=0; parity<2; parity++) {
for (int x=0; x<arg.out.volumeCB; x++) {
updateGaugeFieldCompute<Float,Gauge,Mom,N,conj_mom,exact>
(arg, x, parity);
}
}
}
template<typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
__global__ void updateGaugeFieldKernel(UpdateGaugeArg<Float,Gauge,Mom> arg) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx >= 2*arg.out.volumeCB) return;
int parity = (idx >= arg.out.volumeCB) ? 1 : 0;
idx -= parity*arg.out.volumeCB;
updateGaugeFieldCompute<Float,Gauge,Mom,N,conj_mom,exact>(arg, idx, parity);
}
template <typename Float, typename Gauge, typename Mom, int N,
bool conj_mom, bool exact>
class UpdateGaugeField : public Tunable {
private:
UpdateGaugeArg<Float,Gauge,Mom> arg;
const GaugeField &meta; // meta data
const QudaFieldLocation location; // location of the lattice fields
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
unsigned int minThreads() const { return 2*arg.in.volumeCB; }
bool tuneGridDim() const { return false; }
public:
UpdateGaugeField(const UpdateGaugeArg<Float,Gauge,Mom> &arg,
const GaugeField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("threads=%d,prec=%lu,stride=%d",
2*arg.in.volumeCB, sizeof(Float), arg.in.stride);
}
virtual ~UpdateGaugeField() { }
void apply(const cudaStream_t &stream){
if (location == QUDA_CUDA_FIELD_LOCATION) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
updateGaugeFieldKernel<Float,Gauge,Mom,N,conj_mom,exact>
<<<tp.grid,tp.block,tp.shared_bytes>>>(arg);
} else { // run the CPU code
updateGaugeField<Float,Gauge,Mom,N,conj_mom,exact>(arg);
}
} // apply
long long flops() const {
const int Nc = 3;
return arg.nDim*2*arg.in.volumeCB*N*(Nc*Nc*2 + // scalar-matrix multiply
(8*Nc*Nc*Nc - 2*Nc*Nc) + // matrix-matrix multiply
Nc*Nc*2); // matrix-matrix addition
}
long long bytes() const { return arg.nDim*2*arg.in.volumeCB*
(arg.in.Bytes() + arg.out.Bytes() + arg.momentum.Bytes()); }
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
};
template <typename Float, typename Gauge, typename Mom>
void updateGaugeField(Gauge &out, const Gauge &in, const Mom &mom,
double dt, const GaugeField &meta, bool conj_mom, bool exact,
QudaFieldLocation location) {
// degree of exponential expansion
const int N = 8;
if (conj_mom) {
if (exact) {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,true,true> updateGauge(arg, meta, location);
updateGauge.apply(0);
} else {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,true,false> updateGauge(arg, meta, location);
updateGauge.apply(0);
}
} else {
if (exact) {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,false,true> updateGauge(arg, meta, location);
updateGauge.apply(0);
} else {
UpdateGaugeArg<Float, Gauge, Mom> arg(out, in, mom, dt, 4);
UpdateGaugeField<Float,Gauge,Mom,N,false,false> updateGauge(arg, meta, location);
updateGauge.apply(0);
}
}
if (location == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
template <typename Float, typename Gauge>
void updateGaugeField(Gauge out, const Gauge &in, const GaugeField &mom,
double dt, bool conj_mom, bool exact,
QudaFieldLocation location) {
if (mom.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (mom.Reconstruct() == QUDA_RECONSTRUCT_10) {
// FIX ME - 11 is a misnomer to avoid confusion in template instantiation
updateGaugeField<Float>(out, in, gauge::FloatNOrder<Float,18,2,11>(mom), dt, mom, conj_mom, exact, location);
} else {
errorQuda("Reconstruction type not supported");
}
} else if (mom.Order() == QUDA_MILC_GAUGE_ORDER) {
updateGaugeField<Float>(out, in, gauge::MILCOrder<Float,10>(mom), dt, mom, conj_mom, exact, location);
} else {
errorQuda("Gauge Field order %d not supported", mom.Order());
}
}
template <typename Float>
void updateGaugeField(GaugeField &out, const GaugeField &in, const GaugeField &mom,
double dt, bool conj_mom, bool exact,
QudaFieldLocation location) {
const int Nc = 3;
if (out.Ncolor() != Nc)
errorQuda("Ncolor=%d not supported at this time", out.Ncolor());
if (out.Order() != in.Order() || out.Reconstruct() != in.Reconstruct()) {
errorQuda("Input and output gauge field ordering and reconstruction must match");
}
if (out.isNative()) {
if (out.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G;
updateGaugeField<Float>(G(out),G(in), mom, dt, conj_mom, exact, location);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G;
updateGaugeField<Float>(G(out), G(in), mom, dt, conj_mom, exact, location);
} else {
errorQuda("Reconstruction type not supported");
}
} else if (out.Order() == QUDA_MILC_GAUGE_ORDER) {
updateGaugeField<Float>(gauge::MILCOrder<Float, Nc*Nc*2>(out),
gauge::MILCOrder<Float, Nc*Nc*2>(in),
mom, dt, conj_mom, exact, location);
} else {
errorQuda("Gauge Field order %d not supported", out.Order());
}
}
#endif
void updateGaugeField(GaugeField &out, double dt, const GaugeField& in,
const GaugeField& mom, bool conj_mom, bool exact)
{
#ifdef GPU_GAUGE_TOOLS
if (out.Precision() != in.Precision() || out.Precision() != mom.Precision())
errorQuda("Gauge and momentum fields must have matching precision");
if (out.Location() != in.Location() || out.Location() != mom.Location())
errorQuda("Gauge and momentum fields must have matching location");
if (out.Precision() == QUDA_DOUBLE_PRECISION) {
updateGaugeField<double>(out, in, mom, dt, conj_mom, exact, out.Location());
} else if (out.Precision() == QUDA_SINGLE_PRECISION) {
updateGaugeField<float>(out, in, mom, dt, conj_mom, exact, out.Location());
} else {
errorQuda("Precision %d not supported", out.Precision());
}
#else
errorQuda("Gauge tools are not build");
#endif
}
} // namespace quda
|
71be7a0e82e0b616ec849cceec39af8e299cb3c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include "gpu_hmma.h"
#include <stdio.h>
typedef unsigned long long uint64;
template <uint UNROLL, uint BLOCKS, uint BSIZE, typename T, typename V2, typename MASKT>
__global__ void __launch_bounds__(1024,BLOCKS) bst_masked_softmax(
const uint2* __restrict__ Lut,
const MASKT* __restrict__ Mask,
const bhalf* __restrict__ X,
T* Y,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init, uint max_lut, uint use_mask)
{
__shared__ float Max[32];
__shared__ float Sum[32];
uint64* LutMask64 = (uint64*)&Sum[32];
uint* LutMask32 = (uint*)&Sum[32];
uint* LutOffset = BSIZE == 64 ? (uint*)&LutMask64[max_lut] : &LutMask32[max_lut];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / BSIZE; // Q dim
uint idx_q = blockIdx.x % BSIZE; // Q dim
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
Mask += idx_H * szMask + idx_q * blocks;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
{
// Allows non-power of 2 threads to work
Max[tid] = -FLT_MAX;
Sum[tid] = 0.0f;
}
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < max_lut; i += blockDim.x)
{
if (BSIZE == 64)
{
uint64 mask = 0;
if (i < lut_size)
{
uint2 entry = Lut[i];
uint blk_id = entry.x;
LutOffset[i] = blk_id * BSIZE*BSIZE;
mask = use_mask ? __ldg(Mask + blk_id) : 0xffffffffffffffff;
}
LutMask64[i] = mask;
}
else
{
uint mask = 0;
if (i < lut_size)
{
uint2 entry = Lut[i];
uint blk_id = entry.x;
LutOffset[i] = blk_id * BSIZE*BSIZE;
mask = use_mask ? (uint)__ldg(Mask + blk_id) : 0xffffffff;
}
LutMask32[i] = mask;
}
}
__syncthreads();
// trim warps that we know are out of lut range
if ((tid & (1024-32))*2*UNROLL < lut_size*BSIZE)
{
uint lut_idx = (tid & (1024 - BSIZE/2))*2*UNROLL/BSIZE;
uint tidx = (tid % (BSIZE/2))*2;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*BSIZE + tidx + LutOffset[lut_idx];
X += offset;
asm("mov.b64 %0, %0;" : "+l"(X) : );
bhalf2 xval[UNROLL];
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
ew_set(xval[i], 0xff80ff80); //-inf, -inf
if (lut_idx + i < lut_size)
xval[i] = __ldg((const bhalf2*)(X + i*BSIZE*BSIZE));
}
// split the 64 bit mask by half warp
uint tid16 = BSIZE == 64 ? (tid & 16)/16 : 0;
uint bit0 = 1 << (tidx - tid16*32);
uint bit1 = bit0 << 1;
uint inf = 0xff80;
#pragma unroll
for (int i = 0; i < UNROLL; i++)
{
uint mask = LutMask32[(lut_idx + i)*(BSIZE == 64 ? 2 : 1) + tid16];
asm("{ \n\t"
".reg .pred p0, p1; \n\t"
"setp.eq.u32 p0, %2, 0; \n\t" // if ((mask & bit0) == 0)
"setp.eq.u32 p1, %3, 0; \n\t" // if ((mask & bit1) == 0)
"@p0 prmt.b32 %0, %0, %1, 0x3254;\n\t" // set -inf to lo bits
"@p1 prmt.b32 %0, %0, %1, 0x5410;\n\t" // set -inf to hi bits
"}" : "+r"(xval[i].x) : "r"(inf), "r"(mask & bit0), "r"(mask & bit1));
}
// reduce within thread
float Xmax[UNROLL];
for (int i = 0; i < UNROLL; i++)
Xmax[i] = ew_max(to_float(xval[i]));
float xmax = Xmax[0];
for (int i = 1; i < UNROLL; i++)
xmax = fmaxf(Xmax[i], xmax);
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Max[tid/32] = xmax;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
xmax = Max[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
// final reduction to shared
Max[tid] = xmax;
}
__syncthreads();
xmax = Max[0];
}
// subtract xmax and compute exponent
float exp_sum = 0;
for (int i = 0; i < UNROLL; i++)
{
// use fast approx math: e**x == 2**(x * log2(e))
// log2(e) is included in scale factor
float2 Xval = ew_ex2(ew_mul(ew_sub(to_float(xval[i]), xmax), scale));
exp_sum += ew_sum(Xval);
xval[i] = to_bhalf(Xval);
}
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = exp_sum;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
exp_sum = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
// final reduction to shared
Sum[tid] = exp_sum;
}
__syncthreads();
exp_sum = Sum[0];
}
float rcp_exp_sum = ew_rcp(exp_sum);
Y += offset;
asm("mov.b64 %0, %0;" : "+l"(Y) : );
#pragma unroll
for (int i = 0; i < UNROLL; i++)
{
float2 y2 = ew_mul(to_float(xval[i]), rcp_exp_sum);
store((V2*)Y, y2, i*BSIZE*BSIZE/2, lut_idx + i < lut_size);
}
}
}
template <uint UNROLL, uint BLOCKS, uint BSIZE, typename T, typename V2>
__global__ void __launch_bounds__(1024,BLOCKS) bst_masked_softmax_grad(
const uint2* __restrict__ Lut,
const T* __restrict__ DY,
const T* __restrict__ Y,
T* DX,
uint szLut, uint szHead, uint szBatch, float scale, uint shfl_init)
{
__shared__ float Sum[32];
uint* LutOffset = (uint*)&Sum[32];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / BSIZE;
uint idx_q = blockIdx.x % BSIZE;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
Sum[tid] = 0.0f;
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += blockDim.x)
LutOffset[i] = Lut[i].x * BSIZE*BSIZE;
__syncthreads();
// trim warps that we know are out of lut range
if ((tid & (1024-32))*2*UNROLL < lut_size*BSIZE)
{
uint lut_idx = (tid & (1024 - BSIZE/2))*2*UNROLL/BSIZE;
uint tidx = (tid % (BSIZE/2))*2;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*BSIZE + tidx + LutOffset[lut_idx];
DY += offset;
Y += offset;
asm("mov.b64 %0, %0;" : "+l"(DY) : );
asm("mov.b64 %0, %0;" : "+l"(Y) : );
V2 dy[UNROLL], y[UNROLL];
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
ew_set(dy[i], 0);
ew_set( y[i], 0);
if (lut_idx + i < lut_size)
{
dy[i] = __ldg((const V2*)(DY + i*BSIZE*BSIZE));
y[i] = __ldg((const V2*)( Y + i*BSIZE*BSIZE));
}
}
// compute dy * y and start reduction
float sum_dyy = 0.0f;
for (int i = 0; i < UNROLL; i++)
sum_dyy += ew_sum(ew_mul(to_float(dy[i]), to_float(y[i])));
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = sum_dyy;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sum_dyy = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
// final reduction to shared
Sum[tid] = sum_dyy;
}
__syncthreads();
sum_dyy = Sum[0];
}
DX += offset;
//asm("mov.b64 %0, %0;" : "+l"(DX) : );
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
// dx = (dy - sum_dyy) * y * scale
float2 dx2 = ew_mul(ew_mul(ew_sub(to_float(dy[i]), sum_dyy), to_float(y[i])), scale);
store((V2*)DX, dx2, i*BSIZE*BSIZE/2, lut_idx + i < lut_size);
// asm (
// "{ \n\t"
// ".reg .pred p; \n\t"
// ".reg .s64 DX, offset; \n\t"
// "setp.lt.u32 p, %3, %4; \n\t"
// "mov.b64 offset, {%1, 0}; \n\t"
// "add.s64 DX, %0, offset; \n\t"
// "@p st.global.wb.u32 [DX], %2; \n\t"
// "}" :: "l"(DX), "r"(i*BSIZE*BSIZE*2), "r"(dx.x), "r"(lut_idx + i), "r"(lut_size));
}
}
}
#define LOG2e 1.4426950408889634f
typedef unsigned char uchar;
template <typename T, typename V>
bool BlocksparseMaskedSoftmax(hipStream_t stream,
const uint2* lut,
const char* mask,
const bhalf* x,
T* y,
uint block_size, uint blocks,
uint batch_dim, uint head_dim, uint ctx_blks,
uint lut_heads, uint lut_dim, uint max_lut,
uint mask_heads, float scale)
{
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint szMask = mask_heads > 1 ? blocks * block_size : 0;
uint gridQ = ctx_blks * block_size;
uint szHead = blocks * block_size * block_size;
uint szBatch = head_dim * szHead;
uint maxK = max_lut * block_size;
//hipMemsetD16Async((hipDeviceptr_t)c, 0, szBatch*batch_dim, stream);
// combine scaling with fast exp(x) compute
scale *= LOG2e;
dim3 grid(gridQ, batch_dim, head_dim);
uint unroll, threads;
if (maxK > 1024*16) { unroll = 16; threads = CEIL_DIV(maxK, 32*16*2) * 32; }
else if (maxK > 1024* 8) { unroll = 8; threads = CEIL_DIV(maxK, 32* 8*2) * 32; }
else { unroll = 4; threads = CEIL_DIV(maxK, 32* 4*2) * 32; }
uint bshift = block_size == 64 ? 5 : block_size == 32 ? 4 : block_size == 16 ? 3 : 2;
uint shfl_init = THREAD_POW2(threads) / 64;
uint lut_max = (threads * unroll) >> bshift;
uint shared = lut_max * 8;
if (block_size == 64)
{
shared = lut_max * 12;
if (unroll == 16)
hipLaunchKernelGGL(( bst_masked_softmax<16,1,64,T,V,uint64>), dim3(grid),dim3(threads),shared,stream, lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else if (unroll == 8)
hipLaunchKernelGGL(( bst_masked_softmax< 8,2,64,T,V,uint64>), dim3(grid),dim3(threads),shared,stream, lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else // (unroll == 4)
hipLaunchKernelGGL(( bst_masked_softmax< 4,2,64,T,V,uint64>), dim3(grid),dim3(threads),shared,stream, lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
else if (block_size == 32)
{
if (unroll == 16)
hipLaunchKernelGGL(( bst_masked_softmax<16,1,32,T,V, uint>), dim3(grid),dim3(threads),shared,stream, lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else if (unroll == 8)
hipLaunchKernelGGL(( bst_masked_softmax< 8,2,32,T,V, uint>), dim3(grid),dim3(threads),shared,stream, lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else // (unroll == 4)
hipLaunchKernelGGL(( bst_masked_softmax< 4,2,32,T,V, uint>), dim3(grid),dim3(threads),shared,stream, lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
else if (block_size == 16)
{
if (unroll == 16)
hipLaunchKernelGGL(( bst_masked_softmax<16,1,16,T,V,ushort>), dim3(grid),dim3(threads),shared,stream, lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else if (unroll == 8)
hipLaunchKernelGGL(( bst_masked_softmax< 8,2,16,T,V,ushort>), dim3(grid),dim3(threads),shared,stream, lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else // (unroll == 4)
hipLaunchKernelGGL(( bst_masked_softmax< 4,2,16,T,V,ushort>), dim3(grid),dim3(threads),shared,stream, lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
else
{
if (unroll == 16)
hipLaunchKernelGGL(( bst_masked_softmax<16,1, 8,T,V, uchar>), dim3(grid),dim3(threads),shared,stream, lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else if (unroll == 8)
hipLaunchKernelGGL(( bst_masked_softmax< 8,2, 8,T,V, uchar>), dim3(grid),dim3(threads),shared,stream, lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else // (unroll == 4)
hipLaunchKernelGGL(( bst_masked_softmax< 4,2, 8,T,V, uchar>), dim3(grid),dim3(threads),shared,stream, lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
return true;
}
template bool BlocksparseMaskedSoftmax<ehalf,ehalf2>(hipStream_t stream, const uint2* lut, const char* mask, const bhalf* x, ehalf* y, uint block_size, uint blocks, uint batch_dim, uint head_dim, uint ctx_blks, uint lut_heads, uint lut_dim, uint max_lut, uint mask_heads, float scale);
template bool BlocksparseMaskedSoftmax<bhalf,bhalf2>(hipStream_t stream, const uint2* lut, const char* mask, const bhalf* x, bhalf* y, uint block_size, uint blocks, uint batch_dim, uint head_dim, uint ctx_blks, uint lut_heads, uint lut_dim, uint max_lut, uint mask_heads, float scale);
template <typename T, typename V>
bool BlocksparseMaskedSoftmaxGrad(hipStream_t stream,
const uint2* lut,
const T* dy,
const T* y,
T* dx,
uint block_size, uint blocks,
uint batch_dim, uint head_dim, uint ctx_blks,
uint lut_heads, uint lut_dim, uint max_lut,
float scale)
{
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint gridQ = ctx_blks * block_size;
uint szHead = blocks * block_size * block_size;
uint szBatch = head_dim * szHead;
uint maxK = max_lut * block_size;
//hipMemsetD16Async((hipDeviceptr_t)c, 0, szBatch*batch_dim, stream);
dim3 grid(gridQ, batch_dim, head_dim);
uint unroll, threads;
if (maxK > 1024*16) { unroll = 16; threads = CEIL_DIV(maxK, 32*16*2) * 32; }
else if (maxK > 1024* 8) { unroll = 8; threads = CEIL_DIV(maxK, 32* 8*2) * 32; }
else { unroll = 4; threads = CEIL_DIV(maxK, 32* 4*2) * 32; }
uint bshift = block_size == 64 ? 5 : block_size == 32 ? 4 : block_size == 16 ? 3 : 2;
uint shfl_init = THREAD_POW2(threads) / 64;
uint lut_max = (threads * unroll) >> bshift;
uint shared = lut_max * 4;
if (unroll == 16)
{
if (block_size == 64)
hipLaunchKernelGGL(( bst_masked_softmax_grad<16,1,64,T,V>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else if (block_size == 32)
hipLaunchKernelGGL(( bst_masked_softmax_grad<16,1,32,T,V>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else if (block_size == 16)
hipLaunchKernelGGL(( bst_masked_softmax_grad<16,1,16,T,V>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else
hipLaunchKernelGGL(( bst_masked_softmax_grad<16,1, 8,T,V>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
}
else if (unroll == 8)
{
if (block_size == 64)
hipLaunchKernelGGL(( bst_masked_softmax_grad< 8,2,64,T,V>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else if (block_size == 32)
hipLaunchKernelGGL(( bst_masked_softmax_grad< 8,2,32,T,V>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else if (block_size == 16)
hipLaunchKernelGGL(( bst_masked_softmax_grad< 8,2,16,T,V>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else
hipLaunchKernelGGL(( bst_masked_softmax_grad< 8,2, 8,T,V>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
}
else // (unroll == 4)
{
if (block_size == 64)
hipLaunchKernelGGL(( bst_masked_softmax_grad< 4,2,64,T,V>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else if (block_size == 32)
hipLaunchKernelGGL(( bst_masked_softmax_grad< 4,2,32,T,V>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else if (block_size == 16)
hipLaunchKernelGGL(( bst_masked_softmax_grad< 4,2,16,T,V>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else
hipLaunchKernelGGL(( bst_masked_softmax_grad< 4,2, 8,T,V>), dim3(grid),dim3(threads),shared,stream, lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
}
return true;
}
template bool BlocksparseMaskedSoftmaxGrad<ehalf,ehalf2>(hipStream_t stream, const uint2* lut, const ehalf* dy, const ehalf* y, ehalf* dx, uint block_size, uint blocks, uint batch_dim, uint head_dim, uint ctx_blks, uint lut_heads, uint lut_dim, uint max_lut, float scale);
template bool BlocksparseMaskedSoftmaxGrad<bhalf,bhalf2>(hipStream_t stream, const uint2* lut, const bhalf* dy, const bhalf* y, bhalf* dx, uint block_size, uint blocks, uint batch_dim, uint head_dim, uint ctx_blks, uint lut_heads, uint lut_dim, uint max_lut, float scale);
template <int BSIZE, typename MASKT>
__global__ void __launch_bounds__(32) bst_partial_autoregressive_mask(
const int2* __restrict__ Lut,
const MASKT* __restrict__ MaskI, MASKT* MaskO,
uint blocks, uint szLut, int autoregress_at_k)
{
uint tid = threadIdx.x;
uint bid = blockIdx.x; // grid id (each cuda block being assigned to 32 mask blocks)
uint qid = blockIdx.y; // q dim (row) within block
uint hid = blockIdx.z; // head dim
uint block = bid*32 + tid;
if (block < blocks)
{
uint l = hid*szLut + block;
uint m = hid*blocks*BSIZE + qid*blocks + block;
int2 entry = Lut[l];
MASKT mask = MaskI[m];
int K = entry.y*BSIZE; // entry.y: block index for keys
int Q = entry.x*BSIZE; // entry.x: block index for queries
int q = Q + qid; // full query index
// shift amount for the bidirectional to autoregressive transition
int shift_a = BSIZE - min(max(autoregress_at_k - K, 0), BSIZE);
// shift amount for the normal autoregressive property (lower triagular)
int shift_b = min(max(BSIZE-1 + K - q, 0), BSIZE);
// final shift is min value of these
int shift_c = min(shift_a, shift_b);
// apply the unsigned right shift to a pattern of ones to turn the mask off where needed
// a shift of zero means the mask is unchanged
// a shift of BSIZE means the mask is turned off for this row/block
// somewhere in between means it's partially off.
mask &= (MASKT)-1 >> shift_c;
MaskO[m] = mask;
}
}
bool BstPartialAutoregressiveMask(hipStream_t stream,
const int2* lut, const char* maskI, char* maskO,
uint block_size, uint blocks, uint lut_heads, uint lut_dim, int autoregress_at_k)
{
dim3 grid(CEIL_DIV(blocks,32), block_size, lut_heads);
if (block_size == 64)
hipLaunchKernelGGL(( bst_partial_autoregressive_mask<64,uint64>), dim3(grid),dim3(32),0,stream, lut, (const uint64*)maskI, (uint64*)maskO, blocks, lut_dim, autoregress_at_k);
else if (block_size == 32)
hipLaunchKernelGGL(( bst_partial_autoregressive_mask<32, uint>), dim3(grid),dim3(32),0,stream, lut, (const uint*)maskI, ( uint*)maskO, blocks, lut_dim, autoregress_at_k);
else if (block_size == 16)
hipLaunchKernelGGL(( bst_partial_autoregressive_mask<16,ushort>), dim3(grid),dim3(32),0,stream, lut, (const ushort*)maskI, (ushort*)maskO, blocks, lut_dim, autoregress_at_k);
else
hipLaunchKernelGGL(( bst_partial_autoregressive_mask< 8, uchar>), dim3(grid),dim3(32),0,stream, lut, (const uchar*)maskI, ( uchar*)maskO, blocks, lut_dim, autoregress_at_k);
return true;
}
#endif // GOOGLE_CUDA | 71be7a0e82e0b616ec849cceec39af8e299cb3c8.cu |
#if GOOGLE_CUDA
#include "ew_op_gpu.h"
#include "gpu_hmma.h"
#include <stdio.h>
typedef unsigned long long uint64;
template <uint UNROLL, uint BLOCKS, uint BSIZE, typename T, typename V2, typename MASKT>
__global__ void __launch_bounds__(1024,BLOCKS) bst_masked_softmax(
const uint2* __restrict__ Lut,
const MASKT* __restrict__ Mask,
const bhalf* __restrict__ X,
T* Y,
uint blocks, uint szLut, uint szMask, uint szHead, uint szBatch, float scale, uint shfl_init, uint max_lut, uint use_mask)
{
__shared__ float Max[32];
__shared__ float Sum[32];
uint64* LutMask64 = (uint64*)&Sum[32];
uint* LutMask32 = (uint*)&Sum[32];
uint* LutOffset = BSIZE == 64 ? (uint*)&LutMask64[max_lut] : &LutMask32[max_lut];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / BSIZE; // Q dim
uint idx_q = blockIdx.x % BSIZE; // Q dim
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
Mask += idx_H * szMask + idx_q * blocks;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
{
// Allows non-power of 2 threads to work
Max[tid] = -FLT_MAX;
Sum[tid] = 0.0f;
}
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < max_lut; i += blockDim.x)
{
if (BSIZE == 64)
{
uint64 mask = 0;
if (i < lut_size)
{
uint2 entry = Lut[i];
uint blk_id = entry.x;
LutOffset[i] = blk_id * BSIZE*BSIZE;
mask = use_mask ? __ldg(Mask + blk_id) : 0xffffffffffffffff;
}
LutMask64[i] = mask;
}
else
{
uint mask = 0;
if (i < lut_size)
{
uint2 entry = Lut[i];
uint blk_id = entry.x;
LutOffset[i] = blk_id * BSIZE*BSIZE;
mask = use_mask ? (uint)__ldg(Mask + blk_id) : 0xffffffff;
}
LutMask32[i] = mask;
}
}
__syncthreads();
// trim warps that we know are out of lut range
if ((tid & (1024-32))*2*UNROLL < lut_size*BSIZE)
{
uint lut_idx = (tid & (1024 - BSIZE/2))*2*UNROLL/BSIZE;
uint tidx = (tid % (BSIZE/2))*2;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*BSIZE + tidx + LutOffset[lut_idx];
X += offset;
asm("mov.b64 %0, %0;" : "+l"(X) : );
bhalf2 xval[UNROLL];
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
ew_set(xval[i], 0xff80ff80); //-inf, -inf
if (lut_idx + i < lut_size)
xval[i] = __ldg((const bhalf2*)(X + i*BSIZE*BSIZE));
}
// split the 64 bit mask by half warp
uint tid16 = BSIZE == 64 ? (tid & 16)/16 : 0;
uint bit0 = 1 << (tidx - tid16*32);
uint bit1 = bit0 << 1;
uint inf = 0xff80;
#pragma unroll
for (int i = 0; i < UNROLL; i++)
{
uint mask = LutMask32[(lut_idx + i)*(BSIZE == 64 ? 2 : 1) + tid16];
asm("{ \n\t"
".reg .pred p0, p1; \n\t"
"setp.eq.u32 p0, %2, 0; \n\t" // if ((mask & bit0) == 0)
"setp.eq.u32 p1, %3, 0; \n\t" // if ((mask & bit1) == 0)
"@p0 prmt.b32 %0, %0, %1, 0x3254;\n\t" // set -inf to lo bits
"@p1 prmt.b32 %0, %0, %1, 0x5410;\n\t" // set -inf to hi bits
"}" : "+r"(xval[i].x) : "r"(inf), "r"(mask & bit0), "r"(mask & bit1));
}
// reduce within thread
float Xmax[UNROLL];
for (int i = 0; i < UNROLL; i++)
Xmax[i] = ew_max(to_float(xval[i]));
float xmax = Xmax[0];
for (int i = 1; i < UNROLL; i++)
xmax = fmaxf(Xmax[i], xmax);
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Max[tid/32] = xmax;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
xmax = Max[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
xmax = fmaxf(xmax, shfl_xor(xmax, i));
// final reduction to shared
Max[tid] = xmax;
}
__syncthreads();
xmax = Max[0];
}
// subtract xmax and compute exponent
float exp_sum = 0;
for (int i = 0; i < UNROLL; i++)
{
// use fast approx math: e**x == 2**(x * log2(e))
// log2(e) is included in scale factor
float2 Xval = ew_ex2(ew_mul(ew_sub(to_float(xval[i]), xmax), scale));
exp_sum += ew_sum(Xval);
xval[i] = to_bhalf(Xval);
}
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = exp_sum;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
exp_sum = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
exp_sum += shfl_xor(exp_sum, i);
// final reduction to shared
Sum[tid] = exp_sum;
}
__syncthreads();
exp_sum = Sum[0];
}
float rcp_exp_sum = ew_rcp(exp_sum);
Y += offset;
asm("mov.b64 %0, %0;" : "+l"(Y) : );
#pragma unroll
for (int i = 0; i < UNROLL; i++)
{
float2 y2 = ew_mul(to_float(xval[i]), rcp_exp_sum);
store((V2*)Y, y2, i*BSIZE*BSIZE/2, lut_idx + i < lut_size);
}
}
}
template <uint UNROLL, uint BLOCKS, uint BSIZE, typename T, typename V2>
__global__ void __launch_bounds__(1024,BLOCKS) bst_masked_softmax_grad(
const uint2* __restrict__ Lut,
const T* __restrict__ DY,
const T* __restrict__ Y,
T* DX,
uint szLut, uint szHead, uint szBatch, float scale, uint shfl_init)
{
__shared__ float Sum[32];
uint* LutOffset = (uint*)&Sum[32];
uint tid = threadIdx.x;
uint idx_Q = blockIdx.x / BSIZE;
uint idx_q = blockIdx.x % BSIZE;
uint idx_B = blockIdx.y; // batch dim
uint idx_H = blockIdx.z; // head dim
// each head can optionally have its own lut
Lut += idx_H * szLut;
uint2 lut_head = Lut[idx_Q];
if (tid < 32)
Sum[tid] = 0.0f;
// prefetch the lut data into shared
uint lut_offset = lut_head.x;
uint lut_size = lut_head.y;
Lut += lut_offset;
#pragma unroll 1
for (uint i = tid; i < lut_size; i += blockDim.x)
LutOffset[i] = Lut[i].x * BSIZE*BSIZE;
__syncthreads();
// trim warps that we know are out of lut range
if ((tid & (1024-32))*2*UNROLL < lut_size*BSIZE)
{
uint lut_idx = (tid & (1024 - BSIZE/2))*2*UNROLL/BSIZE;
uint tidx = (tid % (BSIZE/2))*2;
uint offset = idx_B*szBatch + idx_H*szHead + idx_q*BSIZE + tidx + LutOffset[lut_idx];
DY += offset;
Y += offset;
asm("mov.b64 %0, %0;" : "+l"(DY) : );
asm("mov.b64 %0, %0;" : "+l"(Y) : );
V2 dy[UNROLL], y[UNROLL];
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
ew_set(dy[i], 0);
ew_set( y[i], 0);
if (lut_idx + i < lut_size)
{
dy[i] = __ldg((const V2*)(DY + i*BSIZE*BSIZE));
y[i] = __ldg((const V2*)( Y + i*BSIZE*BSIZE));
}
}
// compute dy * y and start reduction
float sum_dyy = 0.0f;
for (int i = 0; i < UNROLL; i++)
sum_dyy += ew_sum(ew_mul(to_float(dy[i]), to_float(y[i])));
// reduce within warp
for (int i = 16; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
if (blockDim.x > 32)
{
// first thread of each warp store to shared
if ((tid & 31) == 0)
Sum[tid/32] = sum_dyy;
__syncthreads();
if (tid < 32)
{
// first warp loads all prior reductions
sum_dyy = Sum[tid];
// reduce within this last warp
#pragma unroll 1
for (uint i = shfl_init; i > 0; i >>= 1)
sum_dyy += shfl_xor(sum_dyy, i);
// final reduction to shared
Sum[tid] = sum_dyy;
}
__syncthreads();
sum_dyy = Sum[0];
}
DX += offset;
//asm("mov.b64 %0, %0;" : "+l"(DX) : );
#pragma unroll
for (uint i = 0; i < UNROLL; i++)
{
// dx = (dy - sum_dyy) * y * scale
float2 dx2 = ew_mul(ew_mul(ew_sub(to_float(dy[i]), sum_dyy), to_float(y[i])), scale);
store((V2*)DX, dx2, i*BSIZE*BSIZE/2, lut_idx + i < lut_size);
// asm (
// "{ \n\t"
// ".reg .pred p; \n\t"
// ".reg .s64 DX, offset; \n\t"
// "setp.lt.u32 p, %3, %4; \n\t"
// "mov.b64 offset, {%1, 0}; \n\t"
// "add.s64 DX, %0, offset; \n\t"
// "@p st.global.wb.u32 [DX], %2; \n\t"
// "}" :: "l"(DX), "r"(i*BSIZE*BSIZE*2), "r"(dx.x), "r"(lut_idx + i), "r"(lut_size));
}
}
}
#define LOG2e 1.4426950408889634f
typedef unsigned char uchar;
template <typename T, typename V>
bool BlocksparseMaskedSoftmax(CUstream stream,
const uint2* lut,
const char* mask,
const bhalf* x,
T* y,
uint block_size, uint blocks,
uint batch_dim, uint head_dim, uint ctx_blks,
uint lut_heads, uint lut_dim, uint max_lut,
uint mask_heads, float scale)
{
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint szMask = mask_heads > 1 ? blocks * block_size : 0;
uint gridQ = ctx_blks * block_size;
uint szHead = blocks * block_size * block_size;
uint szBatch = head_dim * szHead;
uint maxK = max_lut * block_size;
//cuMemsetD16Async((CUdeviceptr)c, 0, szBatch*batch_dim, stream);
// combine scaling with fast exp(x) compute
scale *= LOG2e;
dim3 grid(gridQ, batch_dim, head_dim);
uint unroll, threads;
if (maxK > 1024*16) { unroll = 16; threads = CEIL_DIV(maxK, 32*16*2) * 32; }
else if (maxK > 1024* 8) { unroll = 8; threads = CEIL_DIV(maxK, 32* 8*2) * 32; }
else { unroll = 4; threads = CEIL_DIV(maxK, 32* 4*2) * 32; }
uint bshift = block_size == 64 ? 5 : block_size == 32 ? 4 : block_size == 16 ? 3 : 2;
uint shfl_init = THREAD_POW2(threads) / 64;
uint lut_max = (threads * unroll) >> bshift;
uint shared = lut_max * 8;
if (block_size == 64)
{
shared = lut_max * 12;
if (unroll == 16)
bst_masked_softmax<16,1,64,T,V,uint64><<<grid,threads,shared,stream>>>(lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else if (unroll == 8)
bst_masked_softmax< 8,2,64,T,V,uint64><<<grid,threads,shared,stream>>>(lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else // (unroll == 4)
bst_masked_softmax< 4,2,64,T,V,uint64><<<grid,threads,shared,stream>>>(lut, (const uint64*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
else if (block_size == 32)
{
if (unroll == 16)
bst_masked_softmax<16,1,32,T,V, uint><<<grid,threads,shared,stream>>>(lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else if (unroll == 8)
bst_masked_softmax< 8,2,32,T,V, uint><<<grid,threads,shared,stream>>>(lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else // (unroll == 4)
bst_masked_softmax< 4,2,32,T,V, uint><<<grid,threads,shared,stream>>>(lut, (const uint*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
else if (block_size == 16)
{
if (unroll == 16)
bst_masked_softmax<16,1,16,T,V,ushort><<<grid,threads,shared,stream>>>(lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else if (unroll == 8)
bst_masked_softmax< 8,2,16,T,V,ushort><<<grid,threads,shared,stream>>>(lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else // (unroll == 4)
bst_masked_softmax< 4,2,16,T,V,ushort><<<grid,threads,shared,stream>>>(lut, (const ushort*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
else
{
if (unroll == 16)
bst_masked_softmax<16,1, 8,T,V, uchar><<<grid,threads,shared,stream>>>(lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else if (unroll == 8)
bst_masked_softmax< 8,2, 8,T,V, uchar><<<grid,threads,shared,stream>>>(lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
else // (unroll == 4)
bst_masked_softmax< 4,2, 8,T,V, uchar><<<grid,threads,shared,stream>>>(lut, (const uchar*)mask, x, y, blocks, szLut, szMask, szHead, szBatch, scale, shfl_init, lut_max, mask != NULL);
}
return true;
}
template bool BlocksparseMaskedSoftmax<ehalf,ehalf2>(CUstream stream, const uint2* lut, const char* mask, const bhalf* x, ehalf* y, uint block_size, uint blocks, uint batch_dim, uint head_dim, uint ctx_blks, uint lut_heads, uint lut_dim, uint max_lut, uint mask_heads, float scale);
template bool BlocksparseMaskedSoftmax<bhalf,bhalf2>(CUstream stream, const uint2* lut, const char* mask, const bhalf* x, bhalf* y, uint block_size, uint blocks, uint batch_dim, uint head_dim, uint ctx_blks, uint lut_heads, uint lut_dim, uint max_lut, uint mask_heads, float scale);
template <typename T, typename V>
bool BlocksparseMaskedSoftmaxGrad(CUstream stream,
const uint2* lut,
const T* dy,
const T* y,
T* dx,
uint block_size, uint blocks,
uint batch_dim, uint head_dim, uint ctx_blks,
uint lut_heads, uint lut_dim, uint max_lut,
float scale)
{
uint szLut = lut_heads > 1 ? lut_dim : 0;
uint gridQ = ctx_blks * block_size;
uint szHead = blocks * block_size * block_size;
uint szBatch = head_dim * szHead;
uint maxK = max_lut * block_size;
//cuMemsetD16Async((CUdeviceptr)c, 0, szBatch*batch_dim, stream);
dim3 grid(gridQ, batch_dim, head_dim);
uint unroll, threads;
if (maxK > 1024*16) { unroll = 16; threads = CEIL_DIV(maxK, 32*16*2) * 32; }
else if (maxK > 1024* 8) { unroll = 8; threads = CEIL_DIV(maxK, 32* 8*2) * 32; }
else { unroll = 4; threads = CEIL_DIV(maxK, 32* 4*2) * 32; }
uint bshift = block_size == 64 ? 5 : block_size == 32 ? 4 : block_size == 16 ? 3 : 2;
uint shfl_init = THREAD_POW2(threads) / 64;
uint lut_max = (threads * unroll) >> bshift;
uint shared = lut_max * 4;
if (unroll == 16)
{
if (block_size == 64)
bst_masked_softmax_grad<16,1,64,T,V><<<grid,threads,shared,stream>>>(lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else if (block_size == 32)
bst_masked_softmax_grad<16,1,32,T,V><<<grid,threads,shared,stream>>>(lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else if (block_size == 16)
bst_masked_softmax_grad<16,1,16,T,V><<<grid,threads,shared,stream>>>(lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else
bst_masked_softmax_grad<16,1, 8,T,V><<<grid,threads,shared,stream>>>(lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
}
else if (unroll == 8)
{
if (block_size == 64)
bst_masked_softmax_grad< 8,2,64,T,V><<<grid,threads,shared,stream>>>(lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else if (block_size == 32)
bst_masked_softmax_grad< 8,2,32,T,V><<<grid,threads,shared,stream>>>(lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else if (block_size == 16)
bst_masked_softmax_grad< 8,2,16,T,V><<<grid,threads,shared,stream>>>(lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else
bst_masked_softmax_grad< 8,2, 8,T,V><<<grid,threads,shared,stream>>>(lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
}
else // (unroll == 4)
{
if (block_size == 64)
bst_masked_softmax_grad< 4,2,64,T,V><<<grid,threads,shared,stream>>>(lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else if (block_size == 32)
bst_masked_softmax_grad< 4,2,32,T,V><<<grid,threads,shared,stream>>>(lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else if (block_size == 16)
bst_masked_softmax_grad< 4,2,16,T,V><<<grid,threads,shared,stream>>>(lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
else
bst_masked_softmax_grad< 4,2, 8,T,V><<<grid,threads,shared,stream>>>(lut, dy, y, dx, szLut, szHead, szBatch, scale, shfl_init);
}
return true;
}
template bool BlocksparseMaskedSoftmaxGrad<ehalf,ehalf2>(CUstream stream, const uint2* lut, const ehalf* dy, const ehalf* y, ehalf* dx, uint block_size, uint blocks, uint batch_dim, uint head_dim, uint ctx_blks, uint lut_heads, uint lut_dim, uint max_lut, float scale);
template bool BlocksparseMaskedSoftmaxGrad<bhalf,bhalf2>(CUstream stream, const uint2* lut, const bhalf* dy, const bhalf* y, bhalf* dx, uint block_size, uint blocks, uint batch_dim, uint head_dim, uint ctx_blks, uint lut_heads, uint lut_dim, uint max_lut, float scale);
template <int BSIZE, typename MASKT>
__global__ void __launch_bounds__(32) bst_partial_autoregressive_mask(
const int2* __restrict__ Lut,
const MASKT* __restrict__ MaskI, MASKT* MaskO,
uint blocks, uint szLut, int autoregress_at_k)
{
uint tid = threadIdx.x;
uint bid = blockIdx.x; // grid id (each cuda block being assigned to 32 mask blocks)
uint qid = blockIdx.y; // q dim (row) within block
uint hid = blockIdx.z; // head dim
uint block = bid*32 + tid;
if (block < blocks)
{
uint l = hid*szLut + block;
uint m = hid*blocks*BSIZE + qid*blocks + block;
int2 entry = Lut[l];
MASKT mask = MaskI[m];
int K = entry.y*BSIZE; // entry.y: block index for keys
int Q = entry.x*BSIZE; // entry.x: block index for queries
int q = Q + qid; // full query index
// shift amount for the bidirectional to autoregressive transition
int shift_a = BSIZE - min(max(autoregress_at_k - K, 0), BSIZE);
// shift amount for the normal autoregressive property (lower triagular)
int shift_b = min(max(BSIZE-1 + K - q, 0), BSIZE);
// final shift is min value of these
int shift_c = min(shift_a, shift_b);
// apply the unsigned right shift to a pattern of ones to turn the mask off where needed
// a shift of zero means the mask is unchanged
// a shift of BSIZE means the mask is turned off for this row/block
// somewhere in between means it's partially off.
mask &= (MASKT)-1 >> shift_c;
MaskO[m] = mask;
}
}
bool BstPartialAutoregressiveMask(CUstream stream,
const int2* lut, const char* maskI, char* maskO,
uint block_size, uint blocks, uint lut_heads, uint lut_dim, int autoregress_at_k)
{
dim3 grid(CEIL_DIV(blocks,32), block_size, lut_heads);
if (block_size == 64)
bst_partial_autoregressive_mask<64,uint64><<<grid,32,0,stream>>>(lut, (const uint64*)maskI, (uint64*)maskO, blocks, lut_dim, autoregress_at_k);
else if (block_size == 32)
bst_partial_autoregressive_mask<32, uint><<<grid,32,0,stream>>>(lut, (const uint*)maskI, ( uint*)maskO, blocks, lut_dim, autoregress_at_k);
else if (block_size == 16)
bst_partial_autoregressive_mask<16,ushort><<<grid,32,0,stream>>>(lut, (const ushort*)maskI, (ushort*)maskO, blocks, lut_dim, autoregress_at_k);
else
bst_partial_autoregressive_mask< 8, uchar><<<grid,32,0,stream>>>(lut, (const uchar*)maskI, ( uchar*)maskO, blocks, lut_dim, autoregress_at_k);
return true;
}
#endif // GOOGLE_CUDA |
c164be6ceb9fc884bc254e8441c1c965380c13e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <string.h>
#include <iostream>
#define BLOCOS 1
//#define THREAD
#define CHECK_ERROR(call) do { \
if( hipSuccess != call) { \
std::cerr << std::endl << "CUDA ERRO: " << \
hipGetErrorString(call) << " in file: " << __FILE__ \
<< " in line: " << __LINE__ << std::endl; \
exit(0); \
} } while (0)
using namespace std;
typedef struct automato {
char letra;
automato *prox;
automato *ant;
automato *inf;
int final;
} Automato;
__global__ void pfac(Automato* at, int *matches, char *frase){
int x = blockDim.x * blockIdx.x + threadIdx.x;
}
Automato* newAutomato(Automato* ant) {
Automato *nv = (Automato*) malloc(sizeof(Automato));
nv->prox = NULL;
nv->inf = NULL;
nv->ant = ant;
return nv;
}
Automato* addAlgarismo(Automato *at, char algm, int first) {
if (at != NULL && at->letra == algm && first == 1) {
return at;
}
// Caso algarismo novo seja diferente do algarismo da raiz
else if (at != NULL && at->letra != algm && first == 1) {
Automato *pt = at->inf;
Automato *ant = pt;
while (pt != NULL) {
if (pt->letra == algm) {
return pt;
}
else {
if (pt != NULL) {
ant = pt;
pt = pt->inf;
}
}
}
Automato *nv = newAutomato(at);
nv->letra = algm;
if (ant != NULL) {
ant->inf = nv;
return ant->inf;
}
else {
at->inf = nv;
return at->inf;
}
}
else if(at != NULL && first == 0)
{
Automato *pt = at->prox;
Automato *ant = NULL;
while (pt != NULL) {
if (pt->letra == algm) {
return pt;
}
else
{
ant = pt;
pt = pt->inf;
}
}
Automato *nv = newAutomato(at);
nv->letra = algm;
if (ant != NULL) {
ant->inf = nv;
}
else {
at->prox = nv;
}
return nv;
}
else
{
Automato *nv = newAutomato(NULL);
nv->letra = algm;
return nv;
}
}
void imprimir(Automato *at)
{
Automato *temp = at;
while (temp != NULL) {
printf("%c ", temp->letra);
imprimir(temp->prox);
temp = temp->inf;
printf("\n");
}
}
/*Automato* mallocGPU(Automato *at)
{
Automato *temp = at;
while (temp != NULL) {
imprimir(temp->prox);
temp = temp->inf;
}
}*/
int main (int argc, char **argv)
{
int GPU = 0;
Automato *at = newAutomato(NULL);
at->letra = 'a';
at->prox = NULL;
char frase[255] = "ab abg bede ef"; //"abc acd abb agd acc";
int THREADS = strlen(frase);
Automato *temp = at;
int i = 0;
int first = 1;
while(frase[i] != '\0')
{
if(frase[i] != ' ')
{
temp = addAlgarismo(temp, frase[i], first);
first = 0;
//printf("Letra: %c\n", temp->letra);
}
else
{
temp->final = 1;
temp = at;
first = 1;
}
i++;
}
imprimir(at);
// CPU
char h_fita[255] = "ab abg bede ef";
int *h_matches = (int*) malloc(sizeof(int));
// GPU
Automato *d_at = NULL;
char *d_fita = NULL;
int *d_matches = NULL;
CHECK_ERROR(hipSetDevice(GPU));
*h_matches = 0;
//Reset na GPU selecionada
CHECK_ERROR(hipDeviceReset());
CHECK_ERROR(hipMalloc((void**) &d_at, sizeof(Automato*)));
CHECK_ERROR(hipMalloc((void**) &d_fita, 255*sizeof(char)));
CHECK_ERROR(hipMalloc((void**) &d_matches, sizeof(int)));
//Copiando CPU --> GPU
CHECK_ERROR(hipMemcpy(d_at, at, sizeof(Automato*), hipMemcpyHostToDevice));
CHECK_ERROR(hipMemcpy(d_fita, h_fita, 255*sizeof(char), hipMemcpyHostToDevice));
CHECK_ERROR(hipMemcpy(d_matches, h_matches, sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( pfac) , dim3(BLOCOS), dim3(THREADS), 0, 0, d_at, d_matches, d_fita);
//Copiando GPU --> CPU
CHECK_ERROR(hipMemcpy(at, d_at, sizeof(Automato*), hipMemcpyDeviceToHost));
CHECK_ERROR(hipMemcpy(h_fita, d_fita, 255*sizeof(char), hipMemcpyDeviceToHost));
CHECK_ERROR(hipMemcpy(h_matches, d_matches, sizeof(int), hipMemcpyDeviceToHost));
// Liberando memria na GPU
CHECK_ERROR(hipFree(d_at));
CHECK_ERROR(hipFree(d_fita));
CHECK_ERROR(hipFree(d_matches));
// Liberando memria na CPU
free(at);
free(h_matches);
free(h_fita);
return EXIT_SUCCESS;
}
| c164be6ceb9fc884bc254e8441c1c965380c13e3.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <string.h>
#include <iostream>
#define BLOCOS 1
//#define THREAD
#define CHECK_ERROR(call) do { \
if( cudaSuccess != call) { \
std::cerr << std::endl << "CUDA ERRO: " << \
cudaGetErrorString(call) << " in file: " << __FILE__ \
<< " in line: " << __LINE__ << std::endl; \
exit(0); \
} } while (0)
using namespace std;
typedef struct automato {
char letra;
automato *prox;
automato *ant;
automato *inf;
int final;
} Automato;
__global__ void pfac(Automato* at, int *matches, char *frase){
int x = blockDim.x * blockIdx.x + threadIdx.x;
}
Automato* newAutomato(Automato* ant) {
Automato *nv = (Automato*) malloc(sizeof(Automato));
nv->prox = NULL;
nv->inf = NULL;
nv->ant = ant;
return nv;
}
Automato* addAlgarismo(Automato *at, char algm, int first) {
if (at != NULL && at->letra == algm && first == 1) {
return at;
}
// Caso algarismo novo seja diferente do algarismo da raiz
else if (at != NULL && at->letra != algm && first == 1) {
Automato *pt = at->inf;
Automato *ant = pt;
while (pt != NULL) {
if (pt->letra == algm) {
return pt;
}
else {
if (pt != NULL) {
ant = pt;
pt = pt->inf;
}
}
}
Automato *nv = newAutomato(at);
nv->letra = algm;
if (ant != NULL) {
ant->inf = nv;
return ant->inf;
}
else {
at->inf = nv;
return at->inf;
}
}
else if(at != NULL && first == 0)
{
Automato *pt = at->prox;
Automato *ant = NULL;
while (pt != NULL) {
if (pt->letra == algm) {
return pt;
}
else
{
ant = pt;
pt = pt->inf;
}
}
Automato *nv = newAutomato(at);
nv->letra = algm;
if (ant != NULL) {
ant->inf = nv;
}
else {
at->prox = nv;
}
return nv;
}
else
{
Automato *nv = newAutomato(NULL);
nv->letra = algm;
return nv;
}
}
void imprimir(Automato *at)
{
Automato *temp = at;
while (temp != NULL) {
printf("%c ", temp->letra);
imprimir(temp->prox);
temp = temp->inf;
printf("\n");
}
}
/*Automato* mallocGPU(Automato *at)
{
Automato *temp = at;
while (temp != NULL) {
imprimir(temp->prox);
temp = temp->inf;
}
}*/
int main (int argc, char **argv)
{
int GPU = 0;
Automato *at = newAutomato(NULL);
at->letra = 'a';
at->prox = NULL;
char frase[255] = "ab abg bede ef"; //"abc acd abb agd acc";
int THREADS = strlen(frase);
Automato *temp = at;
int i = 0;
int first = 1;
while(frase[i] != '\0')
{
if(frase[i] != ' ')
{
temp = addAlgarismo(temp, frase[i], first);
first = 0;
//printf("Letra: %c\n", temp->letra);
}
else
{
temp->final = 1;
temp = at;
first = 1;
}
i++;
}
imprimir(at);
// CPU
char h_fita[255] = "ab abg bede ef";
int *h_matches = (int*) malloc(sizeof(int));
// GPU
Automato *d_at = NULL;
char *d_fita = NULL;
int *d_matches = NULL;
CHECK_ERROR(cudaSetDevice(GPU));
*h_matches = 0;
//Reset na GPU selecionada
CHECK_ERROR(cudaDeviceReset());
CHECK_ERROR(cudaMalloc((void**) &d_at, sizeof(Automato*)));
CHECK_ERROR(cudaMalloc((void**) &d_fita, 255*sizeof(char)));
CHECK_ERROR(cudaMalloc((void**) &d_matches, sizeof(int)));
//Copiando CPU --> GPU
CHECK_ERROR(cudaMemcpy(d_at, at, sizeof(Automato*), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(d_fita, h_fita, 255*sizeof(char), cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy(d_matches, h_matches, sizeof(int), cudaMemcpyHostToDevice));
pfac <<<BLOCOS, THREADS>>> (d_at, d_matches, d_fita);
//Copiando GPU --> CPU
CHECK_ERROR(cudaMemcpy(at, d_at, sizeof(Automato*), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(h_fita, d_fita, 255*sizeof(char), cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(h_matches, d_matches, sizeof(int), cudaMemcpyDeviceToHost));
// Liberando memória na GPU
CHECK_ERROR(cudaFree(d_at));
CHECK_ERROR(cudaFree(d_fita));
CHECK_ERROR(cudaFree(d_matches));
// Liberando memória na CPU
free(at);
free(h_matches);
free(h_fita);
return EXIT_SUCCESS;
}
|
0a8e36d8b921499e116cb46ab08d5035353440fc.hip | // !!! This is a file automatically generated by hipify!!!
/**
*Base on https://devblogs.nvidia.com/how-query-device-properties-and-handle-errors-cuda-cc/
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
// print device properties
void showDevice(const hipDeviceProp_t &prop)
{
printf("Device Name : %s\n", prop.name);
printf("Major revision number: %d\n", prop.major);
printf("Minor revision number: %d\n", prop.minor);
printf("Number of Stream MultiProcessor : %d.\n", prop.multiProcessorCount);
printf("Memory Clock Rate (KHz) : %d\n", prop.memoryClockRate);
printf("Memory Bus Width (bits) : %d\n",prop.memoryBusWidth);
printf("Peak Memory Bandwidth (GB/s): %f\n\n",2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf("Total Global Memory : %d.\n", prop.totalGlobalMem);
printf("Shared Memory Per Block : %d.\n", prop.sharedMemPerBlock);
printf("Registers Per Block : %d.\n", prop.regsPerBlock);
printf("Warp Size : %d.\n", prop.warpSize);
printf("Max Threads Per Block : %d.\n", prop.maxThreadsPerBlock);
printf("Max Threads Dim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Max Grid Size[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("Total Const Memory : %d.\n", prop.totalConstMem);
printf("Clock Rate : %d.\n", prop.clockRate);
printf("Texture Alignment : %d.\n", prop.textureAlignment);
printf("Device Overlap : %d.\n", prop.deviceOverlap);
}
bool initCUDA()
{
int count;
printf("CUDA Device Query...\n");
hipGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
printf("You now have %d CUDA devices.\n",count);
// find the device >= 1.X
int i;
for (i = 0; i < count; ++i) {
hipDeviceProp_t prop;
if (hipGetDeviceProperties(&prop, i) == hipSuccess) {
if (prop.major >= 1) {
showDevice(prop);
break;
}
}
}
// if can't find the device
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
// set cuda device
hipSetDevice(i);
return true;
}
int main(int argc, char const *argv[])
{
if (initCUDA()) {
printf("CUDA initialized.\n");
}
return 0;
}
| 0a8e36d8b921499e116cb46ab08d5035353440fc.cu | /**
*Base on https://devblogs.nvidia.com/how-query-device-properties-and-handle-errors-cuda-cc/
*/
#include <stdio.h>
#include <cuda_runtime.h>
// print device properties
void showDevice(const cudaDeviceProp &prop)
{
printf("Device Name : %s\n", prop.name);
printf("Major revision number: %d\n", prop.major);
printf("Minor revision number: %d\n", prop.minor);
printf("Number of Stream MultiProcessor : %d.\n", prop.multiProcessorCount);
printf("Memory Clock Rate (KHz) : %d\n", prop.memoryClockRate);
printf("Memory Bus Width (bits) : %d\n",prop.memoryBusWidth);
printf("Peak Memory Bandwidth (GB/s): %f\n\n",2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf("Total Global Memory : %d.\n", prop.totalGlobalMem);
printf("Shared Memory Per Block : %d.\n", prop.sharedMemPerBlock);
printf("Registers Per Block : %d.\n", prop.regsPerBlock);
printf("Warp Size : %d.\n", prop.warpSize);
printf("Max Threads Per Block : %d.\n", prop.maxThreadsPerBlock);
printf("Max Threads Dim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Max Grid Size[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("Total Const Memory : %d.\n", prop.totalConstMem);
printf("Clock Rate : %d.\n", prop.clockRate);
printf("Texture Alignment : %d.\n", prop.textureAlignment);
printf("Device Overlap : %d.\n", prop.deviceOverlap);
}
bool initCUDA()
{
int count;
printf("CUDA Device Query...\n");
cudaGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
printf("You now have %d CUDA devices.\n",count);
// find the device >= 1.X
int i;
for (i = 0; i < count; ++i) {
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
showDevice(prop);
break;
}
}
}
// if can't find the device
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
// set cuda device
cudaSetDevice(i);
return true;
}
int main(int argc, char const *argv[])
{
if (initCUDA()) {
printf("CUDA initialized.\n");
}
return 0;
}
|
9074bf09f506e0f6a6f9b378a748634b92096dfd.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/native/hip/fused_adam_impl.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/hip/fused_adam_utils.cuh>
#include <ATen/native/hip/MultiTensorApply.cuh>
#include <vector>
namespace at { namespace native {
void _fused_adam_cuda_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool amsgrad,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
std::vector<std::vector<at::Tensor>> tensor_lists{
params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec() };
float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),
"fused_adam_kernel_cuda", [&]() {
multi_tensor_apply_for_fused_optimizer<4>(
tensor_lists,
state_steps,
FusedAdamMathFunctor<scalar_t, 4>(),
lr,
beta1,
beta2,
weight_decay,
eps,
maximize,
/* amsgrad */false,
grad_scale_ptr,
found_inf_ptr);
});
}
} } // namespace at::native
| 9074bf09f506e0f6a6f9b378a748634b92096dfd.cu | #include <ATen/native/cuda/fused_adam_impl.cuh>
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <ATen/native/cuda/fused_adam_utils.cuh>
#include <ATen/native/cuda/MultiTensorApply.cuh>
#include <vector>
namespace at { namespace native {
void _fused_adam_cuda_impl_(
at::TensorList params,
at::TensorList grads,
at::TensorList exp_avgs,
at::TensorList exp_avg_sqs,
at::TensorList state_steps,
const double lr,
const double beta1,
const double beta2,
const double weight_decay,
const double eps,
const bool amsgrad,
const bool maximize,
const c10::optional<at::Tensor>& grad_scale,
const c10::optional<at::Tensor>& found_inf
) {
std::vector<std::vector<at::Tensor>> tensor_lists{
params.vec(), grads.vec(), exp_avgs.vec(), exp_avg_sqs.vec() };
float* grad_scale_ptr = grad_scale.has_value() ? grad_scale->data_ptr<float>() : nullptr;
float* found_inf_ptr = found_inf.has_value() ? found_inf->data_ptr<float>() : nullptr;
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, params[0].scalar_type(),
"fused_adam_kernel_cuda", [&]() {
multi_tensor_apply_for_fused_optimizer<4>(
tensor_lists,
state_steps,
FusedAdamMathFunctor<scalar_t, 4>(),
lr,
beta1,
beta2,
weight_decay,
eps,
maximize,
/* amsgrad */false,
grad_scale_ptr,
found_inf_ptr);
});
}
} } // namespace at::native
|
93fe5d4d1f088e43acc165e7624afadf2444d26e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
__constant__ int datos[1024];
__global__ void kernel(int *d_dst) {
int tId = threadIdx.x + blockIdx.x * blockDim.x;
d_dst[tId] = datos[tId];
}
int main(int argc, char **argv) {
int *d_datos, *h_datos;
hipMalloc((void**)&d_datos, sizeof(int) * 1024);
h_datos = (int *)malloc(sizeof(int) * 1024);
int *test = new int[1024];
memset(test, 0, sizeof(int) * 1024);
for (int i = 0; i < 1024; i++) {
test[i] = i;
}
//GPU Time
hipEvent_t start, stop;
float time;
hipMemcpyToSymbol(datos, test, sizeof(int) * 1024);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
// Kernel call
hipLaunchKernelGGL(( kernel), dim3(1), dim3(1024) , 0, 0, d_datos);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
// Copying From Device to Host
hipMemcpy(h_datos, d_datos, sizeof(int)*1024, hipMemcpyDeviceToHost);
printf("Time : %f ms\n",time);
hipEventDestroy(start);
hipEventDestroy(stop);
free(test);
hipFree(d_datos);
return 0;
}
| 93fe5d4d1f088e43acc165e7624afadf2444d26e.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__constant__ int datos[1024];
__global__ void kernel(int *d_dst) {
int tId = threadIdx.x + blockIdx.x * blockDim.x;
d_dst[tId] = datos[tId];
}
int main(int argc, char **argv) {
int *d_datos, *h_datos;
cudaMalloc((void**)&d_datos, sizeof(int) * 1024);
h_datos = (int *)malloc(sizeof(int) * 1024);
int *test = new int[1024];
memset(test, 0, sizeof(int) * 1024);
for (int i = 0; i < 1024; i++) {
test[i] = i;
}
//GPU Time
cudaEvent_t start, stop;
float time;
cudaMemcpyToSymbol(datos, test, sizeof(int) * 1024);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
// Kernel call
kernel<<< 1, 1024 >>>(d_datos);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
// Copying From Device to Host
cudaMemcpy(h_datos, d_datos, sizeof(int)*1024, cudaMemcpyDeviceToHost);
printf("Time : %f ms\n",time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(test);
cudaFree(d_datos);
return 0;
}
|
c66e03cd887ba0b01a0b7709442b0d90eae30352.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/Exceptions.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/detail/FunctionTraits.h>
#include <cmath>
#include <limits>
#define GPU_LAMBDA __device__ __host__
namespace {
constexpr int num_threads = C10_WARP_SIZE * 2;
constexpr int thread_work_size = 1;
constexpr int block_work_size = thread_work_size * num_threads;
template<typename index_t, typename func_t>
C10_LAUNCH_BOUNDS_1(num_threads)
__global__ void elementwise_kernel_with_index(index_t N, func_t f, typename function_traits<func_t>::result_type *data) {
#pragma unroll
for (int i = 0; i < thread_work_size; i++) {
index_t idx = block_work_size * blockIdx.x + num_threads * i + threadIdx.x;
if (idx < N) {
data[idx] = f(idx);
}
}
}
template<typename func_t>
void gpu_kernel_with_index(at::Tensor &output, func_t f) {
int64_t N = output.numel();
if (N == 0) {
return;
}
int64_t grid = (N + block_work_size - 1) / block_work_size;
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
using scalar_t = typename function_traits<func_t>::result_type;
if (N <= std::numeric_limits<int>::max()) {
hipLaunchKernelGGL(( elementwise_kernel_with_index<int>), dim3(grid), dim3(num_threads), 0, stream, N, f, output.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
hipLaunchKernelGGL(( elementwise_kernel_with_index<int64_t>), dim3(grid), dim3(num_threads), 0, stream, N, f, output.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
} // namespace
namespace at {
namespace native {
Tensor& linspace_cuda_out(const Scalar& start, const Scalar& end, int64_t steps, Tensor& result) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
r.fill_(start);
} else if (isIntegralType(r.scalar_type(), 0)) {
AT_DISPATCH_INTEGRAL_TYPES(r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
// Cast `end` and `start` to `float`, since range can be larger than scalar_t for integral types
float step = (static_cast<float>(scalar_end) - static_cast<float>(scalar_start)) / (steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return scalar_start + (step * ind);
}
return scalar_end - step * (steps - ind - 1);
});
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return scalar_start + (step * ind);
}
return scalar_end - step * (steps - ind - 1);
});
});
}
if (!is_contiguous) {
result.copy_(r);
}
return result;
}
Tensor& logspace_cuda_out(const Scalar& start, const Scalar& end, int64_t steps, double base, Tensor& result) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
if (isComplexType(r.scalar_type())){
r.fill_(::pow(base, start.to<c10::complex<double>>()));
} else {
r.fill_(::pow(base, start.to<double>()));
}
} else if (isIntegralType(r.scalar_type(), 0)) {
AT_DISPATCH_INTEGRAL_TYPES(r.scalar_type(), "logspace_cuda", [&]() {
float scalar_base = static_cast<float>(base); // Use float to avoid promotion to double
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
float step = static_cast<float>(scalar_end - scalar_start) / (steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, scalar_base, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return ::pow(scalar_base, scalar_start + step * ind);
}
return ::pow(scalar_base, scalar_end - step * (steps - ind - 1));
});
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, r.scalar_type(), "logspace_cuda", [&]() {
scalar_t scalar_base = static_cast<scalar_t>(base);
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, scalar_base, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return ::pow(scalar_base, scalar_start + step * ind);
}
return ::pow(scalar_base, scalar_end - step * (steps - ind - 1));
});
});
}
if (!is_contiguous) {
result.copy_(r);
}
return result;
}
Tensor& range_cuda_out(const Scalar& start, const Scalar& end, const Scalar& step, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "range_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
int64_t size = static_cast<int64_t>(((xend - xstart) / xstep) + 1);
if (result.numel() != size) {
result.resize_({size});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
gpu_kernel_with_index(r, [xstart, xstep]GPU_LAMBDA(int64_t ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(!is_contiguous) {
result.copy_(r);
}
});
return result;
}
Tensor& arange_cuda_out(const Scalar& start, const Scalar& end, const Scalar& step, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, result.scalar_type(), "arange_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
// we use double precision for (start - end) / step
// to compute size_d for consistency across devices.
// The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t,
// but double on cpu for the same,
// and the effective output size starts differing on CPU vs GPU because of precision issues, which
// we dont want.
// the corner-case we do want to take into account is int64_t, which has higher precision than double
double size_d;
if (std::is_same<scalar_t, int64_t>::value) {
size_d = ::ceil(static_cast<double>(end.to<accscalar_t>() - start.to<accscalar_t>())
/ step.to<accscalar_t>());
} else {
size_d = ::ceil(static_cast<double>(end.to<double>() - start.to<double>())
/ step.to<double>());
}
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
TORCH_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
"invalid size, possible overflow?");
int64_t size = static_cast<int64_t>(size_d);
int64_t numel = result.numel();
if (numel != size) {
if(numel > 0){
TORCH_WARN("The number of elements in the out tensor of shape ", result.sizes(),
" is ", numel, " which does not match the computed number of elements ", size,
". Note that this may occur as a result of rounding error. "
"The out tensor will be resized to a tensor of shape (", size, ",).");
}
result.resize_({size});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
gpu_kernel_with_index(r, [xstart, xstep]GPU_LAMBDA(int64_t ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(!is_contiguous) {
result.copy_(r);
}
});
return result;
}
}} // namespace at::native
| c66e03cd887ba0b01a0b7709442b0d90eae30352.cu | #include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/Exceptions.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/detail/FunctionTraits.h>
#include <cmath>
#include <limits>
#define GPU_LAMBDA __device__ __host__
namespace {
constexpr int num_threads = C10_WARP_SIZE * 2;
constexpr int thread_work_size = 1;
constexpr int block_work_size = thread_work_size * num_threads;
template<typename index_t, typename func_t>
C10_LAUNCH_BOUNDS_1(num_threads)
__global__ void elementwise_kernel_with_index(index_t N, func_t f, typename function_traits<func_t>::result_type *data) {
#pragma unroll
for (int i = 0; i < thread_work_size; i++) {
index_t idx = block_work_size * blockIdx.x + num_threads * i + threadIdx.x;
if (idx < N) {
data[idx] = f(idx);
}
}
}
template<typename func_t>
void gpu_kernel_with_index(at::Tensor &output, func_t f) {
int64_t N = output.numel();
if (N == 0) {
return;
}
int64_t grid = (N + block_work_size - 1) / block_work_size;
auto stream = at::cuda::getCurrentCUDAStream();
using scalar_t = typename function_traits<func_t>::result_type;
if (N <= std::numeric_limits<int>::max()) {
elementwise_kernel_with_index<int><<<grid, num_threads, 0, stream>>>(N, f, output.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
elementwise_kernel_with_index<int64_t><<<grid, num_threads, 0, stream>>>(N, f, output.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
} // namespace
namespace at {
namespace native {
Tensor& linspace_cuda_out(const Scalar& start, const Scalar& end, int64_t steps, Tensor& result) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
r.fill_(start);
} else if (isIntegralType(r.scalar_type(), 0)) {
AT_DISPATCH_INTEGRAL_TYPES(r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
// Cast `end` and `start` to `float`, since range can be larger than scalar_t for integral types
float step = (static_cast<float>(scalar_end) - static_cast<float>(scalar_start)) / (steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return scalar_start + (step * ind);
}
return scalar_end - step * (steps - ind - 1);
});
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return scalar_start + (step * ind);
}
return scalar_end - step * (steps - ind - 1);
});
});
}
if (!is_contiguous) {
result.copy_(r);
}
return result;
}
Tensor& logspace_cuda_out(const Scalar& start, const Scalar& end, int64_t steps, double base, Tensor& result) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
if (steps == 0) {
// skip
} else if (steps == 1) {
if (isComplexType(r.scalar_type())){
r.fill_(std::pow(base, start.to<c10::complex<double>>()));
} else {
r.fill_(std::pow(base, start.to<double>()));
}
} else if (isIntegralType(r.scalar_type(), 0)) {
AT_DISPATCH_INTEGRAL_TYPES(r.scalar_type(), "logspace_cuda", [&]() {
float scalar_base = static_cast<float>(base); // Use float to avoid promotion to double
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
float step = static_cast<float>(scalar_end - scalar_start) / (steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, scalar_base, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return std::pow(scalar_base, scalar_start + step * ind);
}
return std::pow(scalar_base, scalar_end - step * (steps - ind - 1));
});
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, r.scalar_type(), "logspace_cuda", [&]() {
scalar_t scalar_base = static_cast<scalar_t>(base);
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
const int64_t halfway = steps / 2;
gpu_kernel_with_index(r, [scalar_start, scalar_end, scalar_base, steps, step, halfway]GPU_LAMBDA(int64_t ind) -> scalar_t {
if (ind < halfway) {
return std::pow(scalar_base, scalar_start + step * ind);
}
return std::pow(scalar_base, scalar_end - step * (steps - ind - 1));
});
});
}
if (!is_contiguous) {
result.copy_(r);
}
return result;
}
Tensor& range_cuda_out(const Scalar& start, const Scalar& end, const Scalar& step, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "range_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
int64_t size = static_cast<int64_t>(((xend - xstart) / xstep) + 1);
if (result.numel() != size) {
result.resize_({size});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
gpu_kernel_with_index(r, [xstart, xstep]GPU_LAMBDA(int64_t ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(!is_contiguous) {
result.copy_(r);
}
});
return result;
}
Tensor& arange_cuda_out(const Scalar& start, const Scalar& end, const Scalar& step, Tensor& result) {
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, result.scalar_type(), "arange_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
// we use double precision for (start - end) / step
// to compute size_d for consistency across devices.
// The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t,
// but double on cpu for the same,
// and the effective output size starts differing on CPU vs GPU because of precision issues, which
// we dont want.
// the corner-case we do want to take into account is int64_t, which has higher precision than double
double size_d;
if (std::is_same<scalar_t, int64_t>::value) {
size_d = std::ceil(static_cast<double>(end.to<accscalar_t>() - start.to<accscalar_t>())
/ step.to<accscalar_t>());
} else {
size_d = std::ceil(static_cast<double>(end.to<double>() - start.to<double>())
/ step.to<double>());
}
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
TORCH_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
"invalid size, possible overflow?");
int64_t size = static_cast<int64_t>(size_d);
int64_t numel = result.numel();
if (numel != size) {
if(numel > 0){
TORCH_WARN("The number of elements in the out tensor of shape ", result.sizes(),
" is ", numel, " which does not match the computed number of elements ", size,
". Note that this may occur as a result of rounding error. "
"The out tensor will be resized to a tensor of shape (", size, ",).");
}
result.resize_({size});
}
bool is_contiguous = result.is_contiguous();
Tensor r = !is_contiguous ? at::empty_like(result, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : result;
gpu_kernel_with_index(r, [xstart, xstep]GPU_LAMBDA(int64_t ind) -> scalar_t {
accscalar_t inc = xstep * static_cast<accscalar_t>(ind);
accscalar_t val = xstart + inc;
return static_cast<scalar_t>(val);
});
if(!is_contiguous) {
result.copy_(r);
}
});
return result;
}
}} // namespace at::native
|
dbf4dd17bc08edfc3dfd446eba62778556be0eca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ppm/kernels/codelets.h"
using namespace ppm::kernels::codelets;
#include "ppm/kernels/helpers.cuh"
#include "utils/config.h"
#include "ppm/ptrfreescene.h"
#include "utils/random.h"
#include "ppm/types.h"
using ppm::PtrFreeScene;
using ppm::EyePath;
#include <starpu.h>
#include <cstdio>
#include <cstddef>
namespace ppm { namespace kernels { namespace cuda {
void __global__ generate_photon_paths_impl(
PhotonPath* const photon_paths,
const unsigned photon_paths_count,
Seed* const seed_buffer,
const PtrFreeScene* scene) {
const unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= photon_paths_count)
return;
PhotonPath& path = photon_paths[index];
Ray& ray = path.ray;
float light_pdf;
float pdf;
Spectrum f;
const float u0 = floatRNG(seed_buffer[index]);
const float u1 = floatRNG(seed_buffer[index]);
const float u2 = floatRNG(seed_buffer[index]);
const float u3 = floatRNG(seed_buffer[index]);
const float u4 = floatRNG(seed_buffer[index]);
int light_index;
ppm::LightType light_type;
light_type = helpers::sample_all_lights(u0, scene->area_lights_count, scene->infinite_light, scene->sun_light, scene->sky_light, light_pdf, light_index);
if (light_type == ppm::LIGHT_IL_IS)
helpers::infinite_light_sample_l(u1, u2, u3, u4, scene->infinite_light, scene->infinite_light_map, scene->bsphere, pdf, ray, path.flux);
else if (light_type == ppm::LIGHT_SUN)
helpers::sun_light_sample_l(u1, u2, u3, u4, scene->sun_light, scene->bsphere, pdf, ray, path.flux);
else if (light_type == ppm::LIGHT_IL_SKY)
helpers::sky_light_sample_l(u1, u2, u3, u4, scene->sky_light, scene->bsphere, pdf, ray, path.flux);
else {
helpers::triangle_light_sample_l(u1, u2, u3, u4, scene->area_lights[light_index], scene->mesh_descs, scene->colors, pdf, ray, path.flux);
}
path.flux /= pdf * light_pdf;
path.depth = 0;
path.done = 0;
}
void generate_photon_paths(void* buffers[], void* args_orig) {
int device_id;
hipGetDevice(&device_id);
const timeval start_time = my_WallClockTime();
// cl_args
const starpu_args args;
unsigned iteration;
starpu_codelet_unpack_args(args_orig, &args, &iteration);
// buffers
// photon paths
PhotonPath* const photon_paths = (PhotonPath*)STARPU_VECTOR_GET_PTR(buffers[0]);
const unsigned size = STARPU_VECTOR_GET_NX(buffers[0]);
// seeds
Seed* const seed_buffer = (Seed*)STARPU_VECTOR_GET_PTR(buffers[1]);
// cuda dims
const unsigned threads_per_block = args.config->cuda_block_size;
const unsigned n_blocks = ::ceil(size / (float)threads_per_block);
hipLaunchKernelGGL(( generate_photon_paths_impl)
, dim3(n_blocks), dim3(threads_per_block), 0, starpu_cuda_get_local_stream(),
photon_paths,
size,
seed_buffer,
args.gpu_scene[device_id]);
hipStreamSynchronize(starpu_cuda_get_local_stream());
CUDA_SAFE(hipGetLastError());
const timeval end_time = my_WallClockTime();
task_info("GPU", device_id, 0, iteration, start_time, end_time, "(6) generate_photon_paths");
}
} } }
| dbf4dd17bc08edfc3dfd446eba62778556be0eca.cu | #include "ppm/kernels/codelets.h"
using namespace ppm::kernels::codelets;
#include "ppm/kernels/helpers.cuh"
#include "utils/config.h"
#include "ppm/ptrfreescene.h"
#include "utils/random.h"
#include "ppm/types.h"
using ppm::PtrFreeScene;
using ppm::EyePath;
#include <starpu.h>
#include <cstdio>
#include <cstddef>
namespace ppm { namespace kernels { namespace cuda {
void __global__ generate_photon_paths_impl(
PhotonPath* const photon_paths,
const unsigned photon_paths_count,
Seed* const seed_buffer,
const PtrFreeScene* scene) {
const unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= photon_paths_count)
return;
PhotonPath& path = photon_paths[index];
Ray& ray = path.ray;
float light_pdf;
float pdf;
Spectrum f;
const float u0 = floatRNG(seed_buffer[index]);
const float u1 = floatRNG(seed_buffer[index]);
const float u2 = floatRNG(seed_buffer[index]);
const float u3 = floatRNG(seed_buffer[index]);
const float u4 = floatRNG(seed_buffer[index]);
int light_index;
ppm::LightType light_type;
light_type = helpers::sample_all_lights(u0, scene->area_lights_count, scene->infinite_light, scene->sun_light, scene->sky_light, light_pdf, light_index);
if (light_type == ppm::LIGHT_IL_IS)
helpers::infinite_light_sample_l(u1, u2, u3, u4, scene->infinite_light, scene->infinite_light_map, scene->bsphere, pdf, ray, path.flux);
else if (light_type == ppm::LIGHT_SUN)
helpers::sun_light_sample_l(u1, u2, u3, u4, scene->sun_light, scene->bsphere, pdf, ray, path.flux);
else if (light_type == ppm::LIGHT_IL_SKY)
helpers::sky_light_sample_l(u1, u2, u3, u4, scene->sky_light, scene->bsphere, pdf, ray, path.flux);
else {
helpers::triangle_light_sample_l(u1, u2, u3, u4, scene->area_lights[light_index], scene->mesh_descs, scene->colors, pdf, ray, path.flux);
}
path.flux /= pdf * light_pdf;
path.depth = 0;
path.done = 0;
}
void generate_photon_paths(void* buffers[], void* args_orig) {
int device_id;
cudaGetDevice(&device_id);
const timeval start_time = my_WallClockTime();
// cl_args
const starpu_args args;
unsigned iteration;
starpu_codelet_unpack_args(args_orig, &args, &iteration);
// buffers
// photon paths
PhotonPath* const photon_paths = (PhotonPath*)STARPU_VECTOR_GET_PTR(buffers[0]);
const unsigned size = STARPU_VECTOR_GET_NX(buffers[0]);
// seeds
Seed* const seed_buffer = (Seed*)STARPU_VECTOR_GET_PTR(buffers[1]);
// cuda dims
const unsigned threads_per_block = args.config->cuda_block_size;
const unsigned n_blocks = std::ceil(size / (float)threads_per_block);
generate_photon_paths_impl
<<<n_blocks, threads_per_block, 0, starpu_cuda_get_local_stream()>>>
(photon_paths,
size,
seed_buffer,
args.gpu_scene[device_id]);
cudaStreamSynchronize(starpu_cuda_get_local_stream());
CUDA_SAFE(cudaGetLastError());
const timeval end_time = my_WallClockTime();
task_info("GPU", device_id, 0, iteration, start_time, end_time, "(6) generate_photon_paths");
}
} } }
|
72449489234fa6e0a744694d20bd205cbd059bc4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
__global__ void update_device_data_kernel( unsigned long size, unsigned long order, unsigned long* A, unsigned long* B, unsigned long* C, double* residual, double* x )
{
unsigned long const index = blockDim.x * blockIdx.x + threadIdx.x;
if ( index >= size ) return;
unsigned long* a = A + index * 4;
unsigned long* b = B + index * 4;
unsigned long* c = C + index * 4;
double* alpha = x;
double* beta = x + 4 * order;
double* gamma = beta + 4 * order;
double const weigh = c[0] + c[1] + c[2] + c[3] + 1.0;
#if 0
printf( "size = %ld\n", size );
printf( "order = %ld\n", order );
printf( "index = %ld\n", index );
printf( "weigh = %lf\n", weigh );
printf( "a[0] = %ld\n", a[0] );
printf( "a[1] = %ld\n", a[1] );
printf( "a[2] = %ld\n", a[2] );
printf( "a[3] = %ld\n", a[3] );
printf( "b[0] = %ld\n", b[0] );
printf( "b[1] = %ld\n", b[1] );
printf( "b[2] = %ld\n", b[2] );
printf( "b[3] = %ld\n", b[3] );
printf( "c[0] = %ld\n", c[0] );
printf( "c[1] = %ld\n", c[1] );
printf( "c[2] = %ld\n", c[2] );
printf( "c[3] = %ld\n", c[3] );
#endif
double ss[6];
double cc[4];
for ( unsigned long jndex = 0; jndex < order; ++jndex )
{
unsigned long const offset = jndex * 4;
double* alpha_ = alpha + offset;
double* beta_ = beta + offset;
ss[jndex] = 1.0*a[0]*alpha_[0] + a[1]*alpha_[1] + a[2]*alpha_[2] + a[3]*alpha_[3];
ss[jndex] *= 1.0*b[0]*beta_[0] + b[1]*beta_[1] + b[2]*beta_[2] + b[3]*beta_[3];
#if 0
printf( "ss[%ld/%ld] = %lf\n", jndex, order, ss[jndex] );
#endif
}
cc[0] = -1.0 * c[0]; cc[1] = -1.0 * c[1]; cc[2] = -1.0 * c[2]; cc[3] = -1.0 * c[3];
#if 0
printf( "cc[0] = %lf\n", cc[0] );
printf( "cc[1] = %lf\n", cc[1] );
printf( "cc[2] = %lf\n", cc[2] );
printf( "cc[3] = %lf\n", cc[3] );
printf( "\n" );
#endif
for ( unsigned long kndex = 0; kndex != order; ++kndex )
{
#if 0
double cc0 = gamma[kndex] * ss[kndex];
double cc1 = gamma[kndex+order] * ss[kndex];
double cc2 = gamma[kndex+order+order] * ss[kndex];
double cc3 = gamma[kndex+order+order+order] * ss[kndex];
printf( "kndex %ld\n", kndex );
printf( "cc0 = %lf\n", cc0 );
printf( "cc1 = %lf\n", cc1 );
printf( "cc2 = %lf\n", cc2 );
printf( "cc3 = %lf\n", cc3 );
printf( "\n" );
#endif
cc[0] += gamma[kndex] * ss[kndex];
cc[1] += gamma[kndex+order] * ss[kndex];
cc[2] += gamma[kndex+order+order] * ss[kndex];
cc[3] += gamma[kndex+order+order+order] * ss[kndex];
}
#if 0
printf( "cc[0] = %lf\n", cc[0] );
printf( "cc[1] = %lf\n", cc[1] );
printf( "cc[2] = %lf\n", cc[2] );
printf( "cc[3] = %lf\n", cc[3] );
#endif
residual[index] = ( cc[0] + cc[1] + cc[2] + cc[3] ) / weigh;
}
void update_device_data( unsigned long size, unsigned long order, unsigned long* A, unsigned long* B, unsigned long* C, double* residual, double* x )
{
hipLaunchKernelGGL(( update_device_data_kernel), dim3(1), dim3(size), 0, 0, size, order, A, B, C, residual, x );
hipDeviceSynchronize();
}
| 72449489234fa6e0a744694d20bd205cbd059bc4.cu | #include <cstdio>
__global__ void update_device_data_kernel( unsigned long size, unsigned long order, unsigned long* A, unsigned long* B, unsigned long* C, double* residual, double* x )
{
unsigned long const index = blockDim.x * blockIdx.x + threadIdx.x;
if ( index >= size ) return;
unsigned long* a = A + index * 4;
unsigned long* b = B + index * 4;
unsigned long* c = C + index * 4;
double* alpha = x;
double* beta = x + 4 * order;
double* gamma = beta + 4 * order;
double const weigh = c[0] + c[1] + c[2] + c[3] + 1.0;
#if 0
printf( "size = %ld\n", size );
printf( "order = %ld\n", order );
printf( "index = %ld\n", index );
printf( "weigh = %lf\n", weigh );
printf( "a[0] = %ld\n", a[0] );
printf( "a[1] = %ld\n", a[1] );
printf( "a[2] = %ld\n", a[2] );
printf( "a[3] = %ld\n", a[3] );
printf( "b[0] = %ld\n", b[0] );
printf( "b[1] = %ld\n", b[1] );
printf( "b[2] = %ld\n", b[2] );
printf( "b[3] = %ld\n", b[3] );
printf( "c[0] = %ld\n", c[0] );
printf( "c[1] = %ld\n", c[1] );
printf( "c[2] = %ld\n", c[2] );
printf( "c[3] = %ld\n", c[3] );
#endif
double ss[6];
double cc[4];
for ( unsigned long jndex = 0; jndex < order; ++jndex )
{
unsigned long const offset = jndex * 4;
double* alpha_ = alpha + offset;
double* beta_ = beta + offset;
ss[jndex] = 1.0*a[0]*alpha_[0] + a[1]*alpha_[1] + a[2]*alpha_[2] + a[3]*alpha_[3];
ss[jndex] *= 1.0*b[0]*beta_[0] + b[1]*beta_[1] + b[2]*beta_[2] + b[3]*beta_[3];
#if 0
printf( "ss[%ld/%ld] = %lf\n", jndex, order, ss[jndex] );
#endif
}
cc[0] = -1.0 * c[0]; cc[1] = -1.0 * c[1]; cc[2] = -1.0 * c[2]; cc[3] = -1.0 * c[3];
#if 0
printf( "cc[0] = %lf\n", cc[0] );
printf( "cc[1] = %lf\n", cc[1] );
printf( "cc[2] = %lf\n", cc[2] );
printf( "cc[3] = %lf\n", cc[3] );
printf( "\n" );
#endif
for ( unsigned long kndex = 0; kndex != order; ++kndex )
{
#if 0
double cc0 = gamma[kndex] * ss[kndex];
double cc1 = gamma[kndex+order] * ss[kndex];
double cc2 = gamma[kndex+order+order] * ss[kndex];
double cc3 = gamma[kndex+order+order+order] * ss[kndex];
printf( "kndex %ld\n", kndex );
printf( "cc0 = %lf\n", cc0 );
printf( "cc1 = %lf\n", cc1 );
printf( "cc2 = %lf\n", cc2 );
printf( "cc3 = %lf\n", cc3 );
printf( "\n" );
#endif
cc[0] += gamma[kndex] * ss[kndex];
cc[1] += gamma[kndex+order] * ss[kndex];
cc[2] += gamma[kndex+order+order] * ss[kndex];
cc[3] += gamma[kndex+order+order+order] * ss[kndex];
}
#if 0
printf( "cc[0] = %lf\n", cc[0] );
printf( "cc[1] = %lf\n", cc[1] );
printf( "cc[2] = %lf\n", cc[2] );
printf( "cc[3] = %lf\n", cc[3] );
#endif
residual[index] = ( cc[0] + cc[1] + cc[2] + cc[3] ) / weigh;
}
void update_device_data( unsigned long size, unsigned long order, unsigned long* A, unsigned long* B, unsigned long* C, double* residual, double* x )
{
update_device_data_kernel<<<1, size>>>( size, order, A, B, C, residual, x );
cudaDeviceSynchronize();
}
|
8c0c1b002a53ee90e7c620cae0f8cb8c9b43b0aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "exact_sol.h"
#include "init.h"
#include "parameters.h"
#include "physics.h"
__global__ void initialize(double f[], double x[], double y[], const double dx, const double dy)
{
int tidx = c2f(threadIdx.x + blockIdx.x * blockDim.x);
int tidy = c2f(threadIdx.y + blockIdx.y * blockDim.y);
double xt = (double(tidx) - 0.5f) * dx; //centers of cells
double yt = (double(tidy) - 0.5f) * dy;
double u[4];
if (tidx <= lf + ngc) {
x[f2c(tidx)] = xt;
}
if (tidy <= nf + ngc) {
y[f2c(tidy)] = yt;
}
if (tidx <= lf + ngc) {
if (tidy <= nf + ngc) {
if (IC_type == Lin_gauss) {
u[i_rho ] = exact_soln(tidx, tidy, dx, dy, x, y);
u[i_momx] = ax;
u[i_momy] = ay;
u[i_ener] = 1.0/gr_gamma;
}else if (IC_type == Sod)
{
if (yt < 0.5) {
u[i_rho ] = 1.;
u[i_momx] = 0.;
u[i_momy] = 0.;
u[i_ener] = 1.;
}
else{
u[i_rho ] = 0.125;
u[i_momx] = 0.;
u[i_momy] = 0.;
u[i_ener] = 0.1;
}
}
else if (IC_type == RP_3) {
if ((xt<=4./5)&&(yt<=4./5)) { u[i_rho] = 0.138 ; u[i_momx] = 1.206 ; u[i_momy] = 1.206 ; u[i_ener] = 0.029 ;}
if ((xt>=4./5)&&(yt<=4./5)) { u[i_rho] = 0.5323; u[i_momx] = 0.0 ; u[i_momy] = 1.206 ; u[i_ener] = 0.3; }
if ((xt<=4./5)&&(yt>=4./5)) { u[i_rho] = 0.5323; u[i_momx] = 1.206 ; u[i_momy] = 0.0 ; u[i_ener] = 0.3; }
if ((xt>=4./5)&&(yt>=4./5)) { u[i_rho] = 1.5 ; u[i_momx] = 0.0 ; u[i_momy] = 0.0 ; u[i_ener] = 1.5 ;}
}
primitive_to_conservative(u);
for (int i_cons = i_rho; i_cons <= i_ener; i_cons++){
f[ij_sol(tidy, tidx, i_cons)] = u[i_cons];
}
}
}
}
| 8c0c1b002a53ee90e7c620cae0f8cb8c9b43b0aa.cu | #include "exact_sol.h"
#include "init.h"
#include "parameters.h"
#include "physics.h"
__global__ void initialize(double f[], double x[], double y[], const double dx, const double dy)
{
int tidx = c2f(threadIdx.x + blockIdx.x * blockDim.x);
int tidy = c2f(threadIdx.y + blockIdx.y * blockDim.y);
double xt = (double(tidx) - 0.5f) * dx; //centers of cells
double yt = (double(tidy) - 0.5f) * dy;
double u[4];
if (tidx <= lf + ngc) {
x[f2c(tidx)] = xt;
}
if (tidy <= nf + ngc) {
y[f2c(tidy)] = yt;
}
if (tidx <= lf + ngc) {
if (tidy <= nf + ngc) {
if (IC_type == Lin_gauss) {
u[i_rho ] = exact_soln(tidx, tidy, dx, dy, x, y);
u[i_momx] = ax;
u[i_momy] = ay;
u[i_ener] = 1.0/gr_gamma;
}else if (IC_type == Sod)
{
if (yt < 0.5) {
u[i_rho ] = 1.;
u[i_momx] = 0.;
u[i_momy] = 0.;
u[i_ener] = 1.;
}
else{
u[i_rho ] = 0.125;
u[i_momx] = 0.;
u[i_momy] = 0.;
u[i_ener] = 0.1;
}
}
else if (IC_type == RP_3) {
if ((xt<=4./5)&&(yt<=4./5)) { u[i_rho] = 0.138 ; u[i_momx] = 1.206 ; u[i_momy] = 1.206 ; u[i_ener] = 0.029 ;}
if ((xt>=4./5)&&(yt<=4./5)) { u[i_rho] = 0.5323; u[i_momx] = 0.0 ; u[i_momy] = 1.206 ; u[i_ener] = 0.3; }
if ((xt<=4./5)&&(yt>=4./5)) { u[i_rho] = 0.5323; u[i_momx] = 1.206 ; u[i_momy] = 0.0 ; u[i_ener] = 0.3; }
if ((xt>=4./5)&&(yt>=4./5)) { u[i_rho] = 1.5 ; u[i_momx] = 0.0 ; u[i_momy] = 0.0 ; u[i_ener] = 1.5 ;}
}
primitive_to_conservative(u);
for (int i_cons = i_rho; i_cons <= i_ener; i_cons++){
f[ij_sol(tidy, tidx, i_cons)] = u[i_cons];
}
}
}
}
|
39f9b88b6808d360e19fbd27562ae190c9ff0f78.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-08-01
*/
#include "../../XDevice.h"
#include "../../XUtility.h"
#include "Sub.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
/*
subtraction of data arrays (CUDA Kernel)
c = a - b * \beta
>> a - A matrix
>> b - another matrix
>> c - where we put a-b
>> size - the size of a/b/c
>> beta - the coefficient
*/
__global__
void KernelSUB(DTYPE * a, DTYPE * b, DTYPE * c, int size, DTYPE beta)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
c[i] = a[i] - b[i] * beta;
}
/*
tensor subtraction c = a - b * \beta (cuda version)
>> a - a tensor
>> b - another tensor
>> c - where we put a-b*\beta.
>> beta - the scaling factor
*/
void _CudaSub(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta)
{
CheckNTErrors(a && b && c, "Empty tensor input!");
CheckNTErrors((a->unitNum == b->unitNum && a->unitNum == c->unitNum),
"Unmatched tensors in addition!");
CheckNTErrors((a->dataType == b->dataType && a->dataType == c->dataType),
"Unmatched tensors in addition!");
CheckNTErrors((a->devID == b->devID && a->devID == c->devID),
"The tensors must be on the same!");
int devIDBackup = XDevice::GetGPUDevice();
XDevice::SetGPUDevice(a->devID);
if (!a->isSparse && !b->isSparse) {
CheckNTErrors(!c->isSparse, "Illegal use of sparse matrix in addition!");
if (a->dataType == DEFAULT_DTYPE &&
b->dataType == DEFAULT_DTYPE &&
c->dataType == DEFAULT_DTYPE)
{
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
KernelSUB << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, a->unitNum, beta);
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
XDevice::SetGPUDevice(devIDBackup);
}
/* subtraction over arrays
tensor subtraction c = a - b * \beta (cuda version) with an input handle
>> devID - device ID (MUST >= 0)
>> handle - cuda handle
>> a - an array
>> b - another array
>> c - where we put a-b
>> size - size of the array
>> beta - the coefficient
*/
void _CudaSubWithHandle(int devID, hipblasHandle_t * handle, DTYPE * a, DTYPE * b, DTYPE * c, int size, DTYPE beta)
{
if (size == 0)
return;
if (c == NULL)
c = a;
CheckNTErrors((a && b && c), "Empty arrays in addition!");
int devIDBackup;
ProtectCudaDev(devID, devIDBackup);
if (c == a) {
#ifdef DOUBELPRICSION
hipblasDaxpy(*handle, size, &beta, b, 1, a, 1);
#else
hipblasSaxpy(*handle, size, &beta, b, 1, a, 1);
#endif
}
else {
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(devID, size, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
hipLaunchKernelGGL(( KernelSUB), dim3(blocks), dim3(threads), 0, 0, (DTYPE*)a, (DTYPE*)b, (DTYPE*)c, size, beta);
}
BacktoCudaDev(devID, devIDBackup);
}
#endif // USE_ROCM
} // namespace nts(NiuTrans.Tensor)
| 39f9b88b6808d360e19fbd27562ae190c9ff0f78.cu | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northestern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-08-01
*/
#include "../../XDevice.h"
#include "../../XUtility.h"
#include "Sub.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
subtraction of data arrays (CUDA Kernel)
c = a - b * \beta
>> a - A matrix
>> b - another matrix
>> c - where we put a-b
>> size - the size of a/b/c
>> beta - the coefficient
*/
__global__
void KernelSUB(DTYPE * a, DTYPE * b, DTYPE * c, int size, DTYPE beta)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
c[i] = a[i] - b[i] * beta;
}
/*
tensor subtraction c = a - b * \beta (cuda version)
>> a - a tensor
>> b - another tensor
>> c - where we put a-b*\beta.
>> beta - the scaling factor
*/
void _CudaSub(const XTensor * a, const XTensor * b, XTensor * c, DTYPE beta)
{
CheckNTErrors(a && b && c, "Empty tensor input!");
CheckNTErrors((a->unitNum == b->unitNum && a->unitNum == c->unitNum),
"Unmatched tensors in addition!");
CheckNTErrors((a->dataType == b->dataType && a->dataType == c->dataType),
"Unmatched tensors in addition!");
CheckNTErrors((a->devID == b->devID && a->devID == c->devID),
"The tensors must be on the same!");
int devIDBackup = XDevice::GetGPUDevice();
XDevice::SetGPUDevice(a->devID);
if (!a->isSparse && !b->isSparse) {
CheckNTErrors(!c->isSparse, "Illegal use of sparse matrix in addition!");
if (a->dataType == DEFAULT_DTYPE &&
b->dataType == DEFAULT_DTYPE &&
c->dataType == DEFAULT_DTYPE)
{
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
KernelSUB << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data, a->unitNum, beta);
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
XDevice::SetGPUDevice(devIDBackup);
}
/* subtraction over arrays
tensor subtraction c = a - b * \beta (cuda version) with an input handle
>> devID - device ID (MUST >= 0)
>> handle - cuda handle
>> a - an array
>> b - another array
>> c - where we put a-b
>> size - size of the array
>> beta - the coefficient
*/
void _CudaSubWithHandle(int devID, cublasHandle_t * handle, DTYPE * a, DTYPE * b, DTYPE * c, int size, DTYPE beta)
{
if (size == 0)
return;
if (c == NULL)
c = a;
CheckNTErrors((a && b && c), "Empty arrays in addition!");
int devIDBackup;
ProtectCudaDev(devID, devIDBackup);
if (c == a) {
#ifdef DOUBELPRICSION
cublasDaxpy(*handle, size, &beta, b, 1, a, 1);
#else
cublasSaxpy(*handle, size, &beta, b, 1, a, 1);
#endif
}
else {
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(devID, size, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
KernelSUB<<<blocks, threads>>>((DTYPE*)a, (DTYPE*)b, (DTYPE*)c, size, beta);
}
BacktoCudaDev(devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor)
|
b1d1178072a8a02515ad2437ee73e1cd21dc749c.hip | // !!! This is a file automatically generated by hipify!!!
#define USE_MNIST_LOADER
#define MNIST_DOUBLE
#include "mnist.h"
#include "layer.h"
#include <hip/hip_runtime.h>
#include <cstdio>
#include <time.h>
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
// Define layers of CNN
static Layer l_input = Layer(0, 0, 28*28);
static Layer l_c1 = Layer(5*5, 6, 24*24*6);
static Layer l_s1 = Layer(4*4, 1, 6*6*6);
static Layer l_f = Layer(6*6*6, 10, 10);
static void learn();
static unsigned int classify(double data[28][28]);
static void test();
static double forward_pass(double data[28][28]);
static double back_pass();
static inline void loaddata()
{
mnist_load("data/train-images.idx3-ubyte", "data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
}
int main(int argc, const char **argv)
{
srand(time(NULL));
hipError_t err = hipInit(0);
if (err != hipSuccess) {
fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err);
return 1;
}
loaddata();
learn();
test();
return 0;
}
// Forward propagation of a single row in dataset
static double forward_pass(double data[28][28])
{
float input[28][28];
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[i][j] = data[i][j];
}
}
l_input.clear();
l_c1.clear();
l_s1.clear();
l_f.clear();
clock_t start, end;
start = clock();
l_input.setOutput((float *)input);
hipLaunchKernelGGL(( fp_preact_c1), dim3(64), dim3(64), 0, 0, (float (*)[28])l_input.output, (float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight);
hipLaunchKernelGGL(( fp_bias_c1), dim3(64), dim3(64), 0, 0, (float (*)[24][24])l_c1.preact, l_c1.bias);
hipLaunchKernelGGL(( apply_step_function), dim3(64), dim3(64), 0, 0, l_c1.preact, l_c1.output, l_c1.O);
hipLaunchKernelGGL(( fp_preact_s1), dim3(64), dim3(64), 0, 0, (float (*)[24][24])l_c1.output, (float (*)[6][6])l_s1.preact, (float (*)[4][4])l_s1.weight);
hipLaunchKernelGGL(( fp_bias_s1), dim3(64), dim3(64), 0, 0, (float (*)[6][6])l_s1.preact, l_s1.bias);
hipLaunchKernelGGL(( apply_step_function), dim3(64), dim3(64), 0, 0, l_s1.preact, l_s1.output, l_s1.O);
hipLaunchKernelGGL(( fp_preact_f), dim3(64), dim3(64), 0, 0, (float (*)[6][6])l_s1.output, l_f.preact, (float (*)[6][6][6])l_f.weight);
hipLaunchKernelGGL(( fp_bias_f), dim3(64), dim3(64), 0, 0, l_f.preact, l_f.bias);
hipLaunchKernelGGL(( apply_step_function), dim3(64), dim3(64), 0, 0, l_f.preact, l_f.output, l_f.O);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Back propagation to update weights
static double back_pass()
{
clock_t start, end;
start = clock();
hipLaunchKernelGGL(( bp_weight_f), dim3(64), dim3(64), 0, 0, (float (*)[6][6][6])l_f.d_weight, l_f.d_preact, (float (*)[6][6])l_s1.output);
hipLaunchKernelGGL(( bp_bias_f), dim3(64), dim3(64), 0, 0, l_f.bias, l_f.d_preact);
hipLaunchKernelGGL(( bp_output_s1), dim3(64), dim3(64), 0, 0, (float (*)[6][6])l_s1.d_output, (float (*)[6][6][6])l_f.weight, l_f.d_preact);
hipLaunchKernelGGL(( bp_preact_s1), dim3(64), dim3(64), 0, 0, (float (*)[6][6])l_s1.d_preact, (float (*)[6][6])l_s1.d_output, (float (*)[6][6])l_s1.preact);
hipLaunchKernelGGL(( bp_weight_s1), dim3(64), dim3(64), 0, 0, (float (*)[4][4])l_s1.d_weight, (float (*)[6][6])l_s1.d_preact, (float (*)[24][24])l_c1.output);
hipLaunchKernelGGL(( bp_bias_s1), dim3(64), dim3(64), 0, 0, l_s1.bias, (float (*)[6][6])l_s1.d_preact);
hipLaunchKernelGGL(( bp_output_c1), dim3(64), dim3(64), 0, 0, (float (*)[24][24])l_c1.d_output, (float (*)[4][4])l_s1.weight, (float (*)[6][6])l_s1.d_preact);
hipLaunchKernelGGL(( bp_preact_c1), dim3(64), dim3(64), 0, 0, (float (*)[24][24])l_c1.d_preact, (float (*)[24][24])l_c1.d_output, (float (*)[24][24])l_c1.preact);
hipLaunchKernelGGL(( bp_weight_c1), dim3(64), dim3(64), 0, 0, (float (*)[5][5])l_c1.d_weight, (float (*)[24][24])l_c1.d_preact, (float (*)[28])l_input.output);
hipLaunchKernelGGL(( bp_bias_c1), dim3(64), dim3(64), 0, 0, l_c1.bias, (float (*)[24][24])l_c1.d_preact);
hipLaunchKernelGGL(( apply_grad), dim3(64), dim3(64), 0, 0, l_f.weight, l_f.d_weight, l_f.M * l_f.N);
hipLaunchKernelGGL(( apply_grad), dim3(64), dim3(64), 0, 0, l_s1.weight, l_s1.d_weight, l_s1.M * l_s1.N);
hipLaunchKernelGGL(( apply_grad), dim3(64), dim3(64), 0, 0, l_c1.weight, l_c1.d_weight, l_c1.M * l_c1.N);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Unfold the input layer
static void unfold_input(double input[28][28], double unfolded[24*24][5*5])
{
int a = 0;
(void)unfold_input;
for (int i = 0; i < 2; ++i)
for (int j = 0; j < 2; ++j) {
int b = 0;
for (int x = i; x < i + 2; ++x)
for (int y = j; y < j+2; ++y)
unfolded[a][b++] = input[x][y];
a++;
}
}
static void learn()
{
static hipblasHandle_t blas;
hipblasCreate(&blas);
float err;
int iter = 50;
double time_taken = 0.0;
fprintf(stdout ,"Learning\n");
while (iter < 0 || iter-- > 0) {
err = 0.0f;
for (int i = 0; i < train_cnt; ++i) {
float tmp_err;
time_taken += forward_pass(train_set[i].data);
l_f.bp_clear();
l_s1.bp_clear();
l_c1.bp_clear();
// Euclid distance of train_set[i]
hipLaunchKernelGGL(( makeError), dim3(10), dim3(1), 0, 0, l_f.d_preact, l_f.output, train_set[i].label, 10);
hipblasSnrm2(blas, 10, l_f.d_preact, 1, &tmp_err);
err += tmp_err;
time_taken += back_pass();
}
err /= train_cnt;
fprintf(stdout, "error: %e, time_on_gpu: %lf iter: %d\n", err, time_taken,iter);
if (err < threshold) {
fprintf(stdout, "Training complete, error less than threshold\n\n");
break;
}
}
fprintf(stdout, "\n Time - %lf\n", time_taken);
}
// Returns label of given data (0-9)
static unsigned int classify(double data[28][28])
{
float res[10];
forward_pass(data);
unsigned int max = 0;
hipMemcpy(res, l_f.output, sizeof(float) * 10, hipMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (res[max] < res[i]) {
max = i;
}
}
return max;
}
// Perform forward propagation of test data
static void test()
{
int error = 0;
for (int i = 0; i < test_cnt; ++i) {
if (classify(test_set[i].data) != test_set[i].label) {
++error;
}
}
fprintf(stdout, "Error Rate: %.2lf%%\n",
double(error) / double(test_cnt) * 100.0);
}
| b1d1178072a8a02515ad2437ee73e1cd21dc749c.cu | #define USE_MNIST_LOADER
#define MNIST_DOUBLE
#include "mnist.h"
#include "layer.h"
#include <cuda.h>
#include <cstdio>
#include <time.h>
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
// Define layers of CNN
static Layer l_input = Layer(0, 0, 28*28);
static Layer l_c1 = Layer(5*5, 6, 24*24*6);
static Layer l_s1 = Layer(4*4, 1, 6*6*6);
static Layer l_f = Layer(6*6*6, 10, 10);
static void learn();
static unsigned int classify(double data[28][28]);
static void test();
static double forward_pass(double data[28][28]);
static double back_pass();
static inline void loaddata()
{
mnist_load("data/train-images.idx3-ubyte", "data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
}
int main(int argc, const char **argv)
{
srand(time(NULL));
CUresult err = cuInit(0);
if (err != CUDA_SUCCESS) {
fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err);
return 1;
}
loaddata();
learn();
test();
return 0;
}
// Forward propagation of a single row in dataset
static double forward_pass(double data[28][28])
{
float input[28][28];
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[i][j] = data[i][j];
}
}
l_input.clear();
l_c1.clear();
l_s1.clear();
l_f.clear();
clock_t start, end;
start = clock();
l_input.setOutput((float *)input);
fp_preact_c1<<<64, 64>>>((float (*)[28])l_input.output, (float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight);
fp_bias_c1<<<64, 64>>>((float (*)[24][24])l_c1.preact, l_c1.bias);
apply_step_function<<<64, 64>>>(l_c1.preact, l_c1.output, l_c1.O);
fp_preact_s1<<<64, 64>>>((float (*)[24][24])l_c1.output, (float (*)[6][6])l_s1.preact, (float (*)[4][4])l_s1.weight);
fp_bias_s1<<<64, 64>>>((float (*)[6][6])l_s1.preact, l_s1.bias);
apply_step_function<<<64, 64>>>(l_s1.preact, l_s1.output, l_s1.O);
fp_preact_f<<<64, 64>>>((float (*)[6][6])l_s1.output, l_f.preact, (float (*)[6][6][6])l_f.weight);
fp_bias_f<<<64, 64>>>(l_f.preact, l_f.bias);
apply_step_function<<<64, 64>>>(l_f.preact, l_f.output, l_f.O);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Back propagation to update weights
static double back_pass()
{
clock_t start, end;
start = clock();
bp_weight_f<<<64, 64>>>((float (*)[6][6][6])l_f.d_weight, l_f.d_preact, (float (*)[6][6])l_s1.output);
bp_bias_f<<<64, 64>>>(l_f.bias, l_f.d_preact);
bp_output_s1<<<64, 64>>>((float (*)[6][6])l_s1.d_output, (float (*)[6][6][6])l_f.weight, l_f.d_preact);
bp_preact_s1<<<64, 64>>>((float (*)[6][6])l_s1.d_preact, (float (*)[6][6])l_s1.d_output, (float (*)[6][6])l_s1.preact);
bp_weight_s1<<<64, 64>>>((float (*)[4][4])l_s1.d_weight, (float (*)[6][6])l_s1.d_preact, (float (*)[24][24])l_c1.output);
bp_bias_s1<<<64, 64>>>(l_s1.bias, (float (*)[6][6])l_s1.d_preact);
bp_output_c1<<<64, 64>>>((float (*)[24][24])l_c1.d_output, (float (*)[4][4])l_s1.weight, (float (*)[6][6])l_s1.d_preact);
bp_preact_c1<<<64, 64>>>((float (*)[24][24])l_c1.d_preact, (float (*)[24][24])l_c1.d_output, (float (*)[24][24])l_c1.preact);
bp_weight_c1<<<64, 64>>>((float (*)[5][5])l_c1.d_weight, (float (*)[24][24])l_c1.d_preact, (float (*)[28])l_input.output);
bp_bias_c1<<<64, 64>>>(l_c1.bias, (float (*)[24][24])l_c1.d_preact);
apply_grad<<<64, 64>>>(l_f.weight, l_f.d_weight, l_f.M * l_f.N);
apply_grad<<<64, 64>>>(l_s1.weight, l_s1.d_weight, l_s1.M * l_s1.N);
apply_grad<<<64, 64>>>(l_c1.weight, l_c1.d_weight, l_c1.M * l_c1.N);
end = clock();
return ((double) (end - start)) / CLOCKS_PER_SEC;
}
// Unfold the input layer
static void unfold_input(double input[28][28], double unfolded[24*24][5*5])
{
int a = 0;
(void)unfold_input;
for (int i = 0; i < 2; ++i)
for (int j = 0; j < 2; ++j) {
int b = 0;
for (int x = i; x < i + 2; ++x)
for (int y = j; y < j+2; ++y)
unfolded[a][b++] = input[x][y];
a++;
}
}
static void learn()
{
static cublasHandle_t blas;
cublasCreate(&blas);
float err;
int iter = 50;
double time_taken = 0.0;
fprintf(stdout ,"Learning\n");
while (iter < 0 || iter-- > 0) {
err = 0.0f;
for (int i = 0; i < train_cnt; ++i) {
float tmp_err;
time_taken += forward_pass(train_set[i].data);
l_f.bp_clear();
l_s1.bp_clear();
l_c1.bp_clear();
// Euclid distance of train_set[i]
makeError<<<10, 1>>>(l_f.d_preact, l_f.output, train_set[i].label, 10);
cublasSnrm2(blas, 10, l_f.d_preact, 1, &tmp_err);
err += tmp_err;
time_taken += back_pass();
}
err /= train_cnt;
fprintf(stdout, "error: %e, time_on_gpu: %lf iter: %d\n", err, time_taken,iter);
if (err < threshold) {
fprintf(stdout, "Training complete, error less than threshold\n\n");
break;
}
}
fprintf(stdout, "\n Time - %lf\n", time_taken);
}
// Returns label of given data (0-9)
static unsigned int classify(double data[28][28])
{
float res[10];
forward_pass(data);
unsigned int max = 0;
cudaMemcpy(res, l_f.output, sizeof(float) * 10, cudaMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (res[max] < res[i]) {
max = i;
}
}
return max;
}
// Perform forward propagation of test data
static void test()
{
int error = 0;
for (int i = 0; i < test_cnt; ++i) {
if (classify(test_set[i].data) != test_set[i].label) {
++error;
}
}
fprintf(stdout, "Error Rate: %.2lf%%\n",
double(error) / double(test_cnt) * 100.0);
}
|
4864612a070e4b33ded2e71c7c68a8aac91dc303.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> s d c
@author Ichitaro Yamazaki
*/
#include "magma_internal.h"
#define NB 64
#define A(i,j) (A[(i) + (j)*lda])
#define W(i,j) (W[(i) + (j)*ldw])
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
zlascl_2x2_lower(
int m,
const magmaDoubleComplex* W, int ldw,
magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
magmaDoubleComplex D21 = W( 1, 0 );
magmaDoubleComplex D11 = MAGMA_Z_DIV( W( 1, 1 ), D21 );
magmaDoubleComplex D22 = MAGMA_Z_DIV( W( 0, 0 ), MAGMA_Z_CONJ( D21 ) );
double T = 1.0 / ( MAGMA_Z_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_Z_DIV( MAGMA_Z_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_Z_CONJ( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
zlascl_2x2_upper(
int m,
const magmaDoubleComplex *W, int ldw,
magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
magmaDoubleComplex D21 = W( m, 1 );
magmaDoubleComplex D11 = MAGMA_Z_DIV( W( m+1, 1 ), MAGMA_Z_CONJ( D21 ) );
magmaDoubleComplex D22 = MAGMA_Z_DIV( W( m, 0 ), D21 );
double T = 1.0 / ( MAGMA_Z_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_Z_DIV( MAGMA_Z_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) );
A( ind, 1 ) = MAGMA_Z_CONJ( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) );
}
}
/***************************************************************************//**
Purpose
-------
ZLASCL_2x2 scales the M by M complex matrix A by the 2-by-2 pivot.
TYPE specifies that A may be upper or lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
dW DOUBLE PRECISION vector, dimension (2*lddw)
The matrix containing the 2-by-2 pivot.
@param[in]
lddw INTEGER
The leading dimension of the array W. LDDA >= max(1,M).
@param[in,out]
dA COMPLEX*16 array, dimension (LDDA,N)
The matrix to be scaled by dW. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl_2x2
*******************************************************************************/
extern "C" void
magmablas_zlascl_2x2(
magma_type_t type, magma_int_t m,
magmaDoubleComplex_const_ptr dW, magma_int_t lddw,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( ldda < max(1,m) )
*info = -4;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
if (type == MagmaLower) {
hipLaunchKernelGGL(( zlascl_2x2_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dW, lddw, dA, ldda);
}
else {
hipLaunchKernelGGL(( zlascl_2x2_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dW, lddw, dA, ldda);
}
}
| 4864612a070e4b33ded2e71c7c68a8aac91dc303.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> s d c
@author Ichitaro Yamazaki
*/
#include "magma_internal.h"
#define NB 64
#define A(i,j) (A[(i) + (j)*lda])
#define W(i,j) (W[(i) + (j)*ldw])
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
zlascl_2x2_lower(
int m,
const magmaDoubleComplex* W, int ldw,
magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
magmaDoubleComplex D21 = W( 1, 0 );
magmaDoubleComplex D11 = MAGMA_Z_DIV( W( 1, 1 ), D21 );
magmaDoubleComplex D22 = MAGMA_Z_DIV( W( 0, 0 ), MAGMA_Z_CONJ( D21 ) );
double T = 1.0 / ( MAGMA_Z_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_Z_DIV( MAGMA_Z_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_Z_CONJ( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
zlascl_2x2_upper(
int m,
const magmaDoubleComplex *W, int ldw,
magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
magmaDoubleComplex D21 = W( m, 1 );
magmaDoubleComplex D11 = MAGMA_Z_DIV( W( m+1, 1 ), MAGMA_Z_CONJ( D21 ) );
magmaDoubleComplex D22 = MAGMA_Z_DIV( W( m, 0 ), D21 );
double T = 1.0 / ( MAGMA_Z_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_Z_DIV( MAGMA_Z_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) );
A( ind, 1 ) = MAGMA_Z_CONJ( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) );
}
}
/***************************************************************************//**
Purpose
-------
ZLASCL_2x2 scales the M by M complex matrix A by the 2-by-2 pivot.
TYPE specifies that A may be upper or lower triangular.
Arguments
---------
@param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
@param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
@param[in]
dW DOUBLE PRECISION vector, dimension (2*lddw)
The matrix containing the 2-by-2 pivot.
@param[in]
lddw INTEGER
The leading dimension of the array W. LDDA >= max(1,M).
@param[in,out]
dA COMPLEX*16 array, dimension (LDDA,N)
The matrix to be scaled by dW. See TYPE for the
storage type.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lascl_2x2
*******************************************************************************/
extern "C" void
magmablas_zlascl_2x2(
magma_type_t type, magma_int_t m,
magmaDoubleComplex_const_ptr dW, magma_int_t lddw,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( ldda < max(1,m) )
*info = -4;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 threads( NB );
dim3 grid( magma_ceildiv( m, NB ) );
if (type == MagmaLower) {
zlascl_2x2_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, dW, lddw, dA, ldda);
}
else {
zlascl_2x2_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, dW, lddw, dA, ldda);
}
}
|
53ed5dd641cd9ba24e82c54c024034493800ae45.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zgemvmdot.cu, normal z -> d, Tue Aug 30 09:38:43 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_d
// initialize arrays with zero
__global__ void
magma_dgpumemzero(
double * d,
int n,
int k )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<k; j++)
d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 );
}
}
// dot product
__global__ void
magma_ddot_kernel(
int Gs,
int n,
double * v,
double * r,
double * vtmp)
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_D_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// dot product for multiple vectors
__global__ void
magma_dblockdot_kernel(
int Gs,
int n,
int k,
double * v,
double * r,
double * vtmp)
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// k vectors v(i)
if (i<n){
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = v[i+j*n] * r[i];
}
else {
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = MAGMA_D_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for multiple vectors
__global__ void
magma_dblockreduce_kernel(
int Gs,
int n,
int k,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ]
: MAGMA_D_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// accelerated reduction for one vector
__global__ void
magma_dreduce_kernel_fast( int Gs,
int n,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_D_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated block reduction for multiple vectors
__global__ void
magma_dblockreduce_kernel_fast(
int Gs,
int n,
int k,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<k; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_D_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_D_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaDouble_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaDouble_ptr
r
@param[in]
d1 magmaDouble_ptr
workspace
@param[in]
d2 magmaDouble_ptr
workspace
@param[out]
skp magmaDouble_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dmdotc(
magma_int_t n,
magma_int_t k,
magmaDouble_ptr v,
magmaDouble_ptr r,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (k)* (local_block_size) * sizeof( double ); // k vecs
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
if (k>1) {
hipLaunchKernelGGL(( magma_dblockdot_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, k, v, r, d1 );
}
else {
hipLaunchKernelGGL(( magma_ddot_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, v, r, d1 );
}
/*
// not necessary to zero GPU mem
magma_dgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d1, n*k,1 );
magma_dgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d2, n*k,1 );
//magmablas_dlaset( MagmaFull, n, k, d1, n, UNKNOWN );
//magmablas_dlaset( MagmaFull, n, k, d2, n, UNKNOWN );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
magma_dblockreduce_kernel<<< Gs_next.x, Bs.x, Ms, queue->cuda_stream >>>
( Gs.x, n, k, aux1, aux2 );
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
for( int j=0; j<k; j++) {
magma_dcopyvector( 1, aux1+j*n, 1, skp+j, 1, UNKNOWN );
}
*/
if ( k>1) {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_dblockreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, k, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
else {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_dreduce_kernel_fast), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() ,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
magma_dcopyvector_async( k, aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaDouble_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaDouble_ptr
r
@param[in]
d1 magmaDouble_ptr
workspace
@param[in]
d2 magmaDouble_ptr
workspace
@param[out]
skp magmaDouble_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_dgemvmdot(
magma_int_t n,
magma_int_t k,
magmaDouble_ptr v,
magmaDouble_ptr r,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int rows_left = k;
int offset = 0;
int chunk_size = 4;
// process in chunks of 10 - has to be adapted to hardware and precision
while( rows_left > (chunk_size) ) {
magma_dmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue );
offset = offset + chunk_size;
rows_left = rows_left-chunk_size;
}
// process rest
magma_dmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue );
return MAGMA_SUCCESS;
}
| 53ed5dd641cd9ba24e82c54c024034493800ae45.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zgemvmdot.cu, normal z -> d, Tue Aug 30 09:38:43 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_d
// initialize arrays with zero
__global__ void
magma_dgpumemzero(
double * d,
int n,
int k )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
for( int j=0; j<k; j++)
d[ i+j*n ] = MAGMA_D_MAKE( 0.0, 0.0 );
}
}
// dot product
__global__ void
magma_ddot_kernel(
int Gs,
int n,
double * v,
double * r,
double * vtmp)
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_D_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// dot product for multiple vectors
__global__ void
magma_dblockdot_kernel(
int Gs,
int n,
int k,
double * v,
double * r,
double * vtmp)
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
// k vectors v(i)
if (i<n){
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = v[i+j*n] * r[i];
}
else {
for( j=0; j<k; j++)
temp[Idx+j*blockDim.x] = MAGMA_D_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// block reduction for multiple vectors
__global__ void
magma_dblockreduce_kernel(
int Gs,
int n,
int k,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] = ( i < n ) ? vtmp[ i+j*n ]
: MAGMA_D_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
// accelerated reduction for one vector
__global__ void
magma_dreduce_kernel_fast( int Gs,
int n,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_D_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_D_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// accelerated block reduction for multiple vectors
__global__ void
magma_dblockreduce_kernel_fast(
int Gs,
int n,
int k,
double * vtmp,
double * vtmp2 )
{
extern __shared__ double temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<k; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_D_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_D_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<k; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<k; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<k; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<k; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
/**
Purpose
-------
Computes the scalar product of a set of vectors v_i such that
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaDouble_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaDouble_ptr
r
@param[in]
d1 magmaDouble_ptr
workspace
@param[in]
d2 magmaDouble_ptr
workspace
@param[out]
skp magmaDouble_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dmdotc(
magma_int_t n,
magma_int_t k,
magmaDouble_ptr v,
magmaDouble_ptr r,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = (k)* (local_block_size) * sizeof( double ); // k vecs
magmaDouble_ptr aux1 = d1, aux2 = d2;
int b = 1;
if (k>1) {
magma_dblockdot_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>( Gs.x, n, k, v, r, d1 );
}
else {
magma_ddot_kernel<<< Gs, Bs, Ms, queue->cuda_stream() >>>( Gs.x, n, v, r, d1 );
}
/*
// not necessary to zero GPU mem
magma_dgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d1, n*k,1 );
magma_dgpumemzero<<< Gs, Bs, 0, queue->cuda_stream >>>( d2, n*k,1 );
//magmablas_dlaset( MagmaFull, n, k, d1, n, UNKNOWN );
//magmablas_dlaset( MagmaFull, n, k, d2, n, UNKNOWN );
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
magma_dblockreduce_kernel<<< Gs_next.x, Bs.x, Ms, queue->cuda_stream >>>
( Gs.x, n, k, aux1, aux2 );
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
for( int j=0; j<k; j++) {
magma_dcopyvector( 1, aux1+j*n, 1, skp+j, 1, UNKNOWN );
}
*/
if ( k>1) {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_dblockreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, k, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
else {
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_dreduce_kernel_fast<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
}
magma_dcopyvector_async( k, aux1, n, skp, 1, queue );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This is an extension of the merged dot product above by chunking
the set of vectors v_i such that the data always fits into cache.
It is equivalent to a matrix vecor product Vr where V
contains few rows and many columns. The computation is the same:
skp = ( <v_0,r>, <v_1,r>, .. )
Returns the vector skp.
Arguments
---------
@param[in]
n int
length of v_i and r
@param[in]
k int
# vectors v_i
@param[in]
v magmaDouble_ptr
v = (v_0 .. v_i.. v_k)
@param[in]
r magmaDouble_ptr
r
@param[in]
d1 magmaDouble_ptr
workspace
@param[in]
d2 magmaDouble_ptr
workspace
@param[out]
skp magmaDouble_ptr
vector[k] of scalar products (<v_i,r>...)
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_dgemvmdot(
magma_int_t n,
magma_int_t k,
magmaDouble_ptr v,
magmaDouble_ptr r,
magmaDouble_ptr d1,
magmaDouble_ptr d2,
magmaDouble_ptr skp,
magma_queue_t queue )
{
int rows_left = k;
int offset = 0;
int chunk_size = 4;
// process in chunks of 10 - has to be adapted to hardware and precision
while( rows_left > (chunk_size) ) {
magma_dmdotc( n, chunk_size, v+offset*n, r, d1, d2, skp+offset, queue );
offset = offset + chunk_size;
rows_left = rows_left-chunk_size;
}
// process rest
magma_dmdotc( n, rows_left, v+offset*n, r, d1, d2, skp+offset, queue );
return MAGMA_SUCCESS;
}
|
71bd6ebf27b144829506578f2bd1963fe307f849.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#define UINT64 long long
__global__ void
sumKernel ( UINT64 * a, UINT64 * b, UINT64 * m, UINT64 * c )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
c[idx] = ( a[idx] + b[idx] ) % m[idx];
}
__global__ void
diffKernel ( UINT64 * a, UINT64 * b, UINT64 * m, UINT64 * c )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
c[idx] = ( m[idx] + a[idx] - b[idx] ) % m[idx];
}
__global__ void
mulKernel ( UINT64 * a, UINT64 * b, UINT64 * m, UINT64 * c )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
c[idx] = ( a[idx] * b[idx] ) % m[idx];
}
__global__ void
divKernel ( UINT64 * a, UINT64 * b, UINT64 * m, UINT64 * c )
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
for ( int i = 0; i < m[ index ]; i++ )
if ( ( i*b[ index ] + m[ index ] ) % m[ index ] == ( a[ index ] % m[ index ] ) )
{
c[ index ] = i;
break;
}
}
void doOperation( UINT64 * aDev, UINT64 * bDev, UINT64 * mDev, UINT64 * cDev, int operationType, const dim3 & threads, const dim3 & blocks )
{
if ( operationType == 1 )
hipLaunchKernelGGL(( sumKernel), dim3(blocks), dim3(threads), 0, 0, aDev, bDev, mDev, cDev);
else if ( operationType == 2 )
hipLaunchKernelGGL(( diffKernel), dim3(blocks), dim3(threads), 0, 0, aDev, bDev, mDev, cDev);
else if ( operationType == 3 )
hipLaunchKernelGGL(( mulKernel), dim3(blocks), dim3(threads), 0, 0, aDev, bDev, mDev, cDev);
else if ( operationType == 4 )
hipLaunchKernelGGL(( mulKernel), dim3(blocks), dim3(threads), 0, 0, aDev, bDev, mDev, cDev);
}
| 71bd6ebf27b144829506578f2bd1963fe307f849.cu | #include <cuda_runtime.h>
#define UINT64 long long
__global__ void
sumKernel ( UINT64 * a, UINT64 * b, UINT64 * m, UINT64 * c )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
c[idx] = ( a[idx] + b[idx] ) % m[idx];
}
__global__ void
diffKernel ( UINT64 * a, UINT64 * b, UINT64 * m, UINT64 * c )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
c[idx] = ( m[idx] + a[idx] - b[idx] ) % m[idx];
}
__global__ void
mulKernel ( UINT64 * a, UINT64 * b, UINT64 * m, UINT64 * c )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
c[idx] = ( a[idx] * b[idx] ) % m[idx];
}
__global__ void
divKernel ( UINT64 * a, UINT64 * b, UINT64 * m, UINT64 * c )
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
for ( int i = 0; i < m[ index ]; i++ )
if ( ( i*b[ index ] + m[ index ] ) % m[ index ] == ( a[ index ] % m[ index ] ) )
{
c[ index ] = i;
break;
}
}
void doOperation( UINT64 * aDev, UINT64 * bDev, UINT64 * mDev, UINT64 * cDev, int operationType, const dim3 & threads, const dim3 & blocks )
{
if ( operationType == 1 )
sumKernel<<<blocks, threads>>> (aDev, bDev, mDev, cDev);
else if ( operationType == 2 )
diffKernel<<<blocks, threads>>> (aDev, bDev, mDev, cDev);
else if ( operationType == 3 )
mulKernel<<<blocks, threads>>> (aDev, bDev, mDev, cDev);
else if ( operationType == 4 )
mulKernel<<<blocks, threads>>> (aDev, bDev, mDev, cDev);
}
|
81d493ae5ff4d57172275cf3e4dc5cc82231b124.hip | // !!! This is a file automatically generated by hipify!!!
#include"fft.h"
void fft(int m,int n,int k,int l,hipfftComplex* t,hipfftComplex* ft){
int a=m*n*l;//HIPFFT_BACKWARD OR HIPFFT_FORWARD
hipfftComplex* odata;
hipfftComplex* idata;
hipError_t stat1=hipSuccess;
hipError_t stat2=hipSuccess;
hipError_t stat3=hipSuccess;
hipError_t stat4=hipSuccess;
hipfftResult cufftstat1=HIPFFT_SUCCESS;
hipfftResult cufftstat2=HIPFFT_SUCCESS;
hipfftResult cufftstat3=HIPFFT_SUCCESS;
stat1=hipMalloc((void**)&odata,sizeof(hipfftComplex)*m*n*k*l);
stat2=hipMalloc((void**)&idata,sizeof(hipfftComplex)*m*n*k*l);
assert(stat1==hipSuccess);
assert(stat2==hipSuccess);
stat3=hipMemcpy(
idata,
t,
sizeof(hipfftComplex)*m*n*k*l
,hipMemcpyHostToDevice);
assert(hipSuccess==stat3);
hipfftHandle plan;
cufftstat1=hipfftPlan2d(&plan,a,k,HIPFFT_C2C);
cufftstat2=hipfftExecC2C(
plan,
(hipfftComplex*)idata,
(hipfftComplex*)odata,
HIPFFT_FORWARD);
hipDeviceSynchronize();
stat4=hipMemcpy(
ft,
odata,
sizeof(hipfftComplex)*m*n*k*l,
hipMemcpyDeviceToHost);
assert(stat4==hipSuccess);
cufftstat3=hipfftDestroy(plan);
if(cufftstat1!=HIPFFT_SUCCESS||
cufftstat2!=HIPFFT_SUCCESS||
cufftstat3!=HIPFFT_SUCCESS){
printf("cufft API error");
exit(-1);
}
hipFree(odata);
hipFree(idata);
}
void ifft(int m ,int n,int k,int l,hipfftComplex* t,hipfftComplex* ft){
int a=m*n*l;//HIPFFT_BACKWARD OR HIPFFT_FORWARD
hipfftComplex* odata;
hipfftComplex* idata;
hipError_t stat1=hipSuccess;
hipError_t stat2=hipSuccess;
hipError_t stat3=hipSuccess;
hipError_t stat4=hipSuccess;
hipfftResult cufftstat1=HIPFFT_SUCCESS;
hipfftResult cufftstat2=HIPFFT_SUCCESS;
hipfftResult cufftstat3=HIPFFT_SUCCESS;
stat1=hipMalloc((void**)&odata,sizeof(hipfftComplex)*m*n*k*l);
stat2=hipMalloc((void**)&idata,sizeof(hipfftComplex)*m*n*k*l);
stat3=hipMemcpy(
idata,
t,
sizeof(hipfftComplex)*m*n*k*l,
hipMemcpyHostToDevice);
hipfftHandle plan;
cufftstat1=hipfftPlan2d(&plan,a,k,HIPFFT_C2C);
cufftstat2=hipfftExecC2C(
plan,
(hipfftComplex*)idata,
(hipfftComplex*)odata,
HIPFFT_BACKWARD);
if(hipDeviceSynchronize()!=hipSuccess){
printf("cuda synchronize failed");
return;
}
stat4=hipMemcpy(
ft,
odata,
sizeof(hipfftComplex)*m*n*k*l,
hipMemcpyDeviceToHost);
cufftstat3=hipfftDestroy(plan);
assert(stat1==hipSuccess);
assert(stat2==hipSuccess);
assert(stat3==hipSuccess);
assert(stat4==hipSuccess);
assert(cufftstat1==HIPFFT_SUCCESS);
assert(cufftstat2==HIPFFT_SUCCESS);
assert(cufftstat3==HIPFFT_SUCCESS);
hipFree(odata);
hipFree(idata);
}
| 81d493ae5ff4d57172275cf3e4dc5cc82231b124.cu | #include"fft.h"
void fft(int m,int n,int k,int l,cufftComplex* t,cufftComplex* ft){
int a=m*n*l;//CUFFT_INVERSE OR CUFFT_FORWARD
cufftComplex* odata;
cufftComplex* idata;
cudaError_t stat1=cudaSuccess;
cudaError_t stat2=cudaSuccess;
cudaError_t stat3=cudaSuccess;
cudaError_t stat4=cudaSuccess;
cufftResult cufftstat1=CUFFT_SUCCESS;
cufftResult cufftstat2=CUFFT_SUCCESS;
cufftResult cufftstat3=CUFFT_SUCCESS;
stat1=cudaMalloc((void**)&odata,sizeof(cufftComplex)*m*n*k*l);
stat2=cudaMalloc((void**)&idata,sizeof(cufftComplex)*m*n*k*l);
assert(stat1==cudaSuccess);
assert(stat2==cudaSuccess);
stat3=cudaMemcpy(
idata,
t,
sizeof(cufftComplex)*m*n*k*l
,cudaMemcpyHostToDevice);
assert(cudaSuccess==stat3);
cufftHandle plan;
cufftstat1=cufftPlan2d(&plan,a,k,CUFFT_C2C);
cufftstat2=cufftExecC2C(
plan,
(cufftComplex*)idata,
(cufftComplex*)odata,
CUFFT_FORWARD);
cudaDeviceSynchronize();
stat4=cudaMemcpy(
ft,
odata,
sizeof(cufftComplex)*m*n*k*l,
cudaMemcpyDeviceToHost);
assert(stat4==cudaSuccess);
cufftstat3=cufftDestroy(plan);
if(cufftstat1!=CUFFT_SUCCESS||
cufftstat2!=CUFFT_SUCCESS||
cufftstat3!=CUFFT_SUCCESS){
printf("cufft API error");
exit(-1);
}
cudaFree(odata);
cudaFree(idata);
}
void ifft(int m ,int n,int k,int l,cufftComplex* t,cufftComplex* ft){
int a=m*n*l;//CUFFT_INVERSE OR CUFFT_FORWARD
cufftComplex* odata;
cufftComplex* idata;
cudaError_t stat1=cudaSuccess;
cudaError_t stat2=cudaSuccess;
cudaError_t stat3=cudaSuccess;
cudaError_t stat4=cudaSuccess;
cufftResult cufftstat1=CUFFT_SUCCESS;
cufftResult cufftstat2=CUFFT_SUCCESS;
cufftResult cufftstat3=CUFFT_SUCCESS;
stat1=cudaMalloc((void**)&odata,sizeof(cufftComplex)*m*n*k*l);
stat2=cudaMalloc((void**)&idata,sizeof(cufftComplex)*m*n*k*l);
stat3=cudaMemcpy(
idata,
t,
sizeof(cufftComplex)*m*n*k*l,
cudaMemcpyHostToDevice);
cufftHandle plan;
cufftstat1=cufftPlan2d(&plan,a,k,CUFFT_C2C);
cufftstat2=cufftExecC2C(
plan,
(cufftComplex*)idata,
(cufftComplex*)odata,
CUFFT_INVERSE);
if(cudaDeviceSynchronize()!=cudaSuccess){
printf("cuda synchronize failed");
return;
}
stat4=cudaMemcpy(
ft,
odata,
sizeof(cufftComplex)*m*n*k*l,
cudaMemcpyDeviceToHost);
cufftstat3=cufftDestroy(plan);
assert(stat1==cudaSuccess);
assert(stat2==cudaSuccess);
assert(stat3==cudaSuccess);
assert(stat4==cudaSuccess);
assert(cufftstat1==CUFFT_SUCCESS);
assert(cufftstat2==CUFFT_SUCCESS);
assert(cufftstat3==CUFFT_SUCCESS);
cudaFree(odata);
cudaFree(idata);
}
|
e68257860cb0d1d55ef2daa1e8ce8f157a7ef292.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is being provided
* under the terms and conditions of a Source Code License Agreement.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/*
*
* -- LAPACK auxiliary routine (version 3.2) --
* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd..
* November 2006
*
* .. Scalar Arguments ..
INTEGER INCX, K1, K2, LDA, N
* ..
* .. Array Arguments ..
INTEGER IPIV( * )
DOUBLE PRECISION A( LDA, * )
* ..
*
* Purpose
* =======
*
* DLASWP performs a series of row interchanges on the matrix A.
* One row interchange is initiated for each of rows K1 through K2 of A.
*
* Arguments
* =========
*
* N (input) INTEGER
* The number of columns of the matrix A.
*
* A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
* On entry, the matrix of column dimension N to which the row
* interchanges will be applied.
* On exit, the permuted matrix.
*
* LDA (input) INTEGER
* The leading dimension of the array A.
* K1 (input) INTEGER
* The first element of IPIV for which a row interchange will
* be done.
*
* K2 (input) INTEGER
* The last element of IPIV for which a row interchange will
* be done.
*
* IPIV (input) INTEGER array, dimension (K2*abs(INCX))
* The vector of pivot indices. Only the elements in positions
* K1 through K2 of IPIV are accessed.
* IPIV(K) = L implies rows K and L are to be interchanged.
*
* INCX (input) INTEGER
* The increment between successive values of IPIV. If IPIV
* is negative, the pivots are applied in reverse order.
*
*/
#define DLASWP_BLOCK_SIZE 256
// based on swap_rows - in just folds in the loop from the cpu,
// so there is no going in and out of the gpu
__global__ void dlaswp(int n, double *A, int lda, int *ipiv, int k1, int k2)
{
for (; k1 < k2 ; ++k1)
{
int src_row = k1;
int dst_row = ipiv[k1];
for (int col_id = threadIdx.x ; col_id < n ; col_id += blockDim.x)
{
double A_tmp = A[col_id*lda + src_row];
A[col_id*lda + src_row] = A[col_id*lda + dst_row];
A[col_id*lda + dst_row] = A_tmp;
__syncthreads();
}
// TODO: we have very poor coalescing here. Can't we do better? Launch one warp of threads per column and
// ask those threads to reorder the column, for example.
}
}
| e68257860cb0d1d55ef2daa1e8ce8f157a7ef292.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is being provided
* under the terms and conditions of a Source Code License Agreement.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/*
*
* -- LAPACK auxiliary routine (version 3.2) --
* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd..
* November 2006
*
* .. Scalar Arguments ..
INTEGER INCX, K1, K2, LDA, N
* ..
* .. Array Arguments ..
INTEGER IPIV( * )
DOUBLE PRECISION A( LDA, * )
* ..
*
* Purpose
* =======
*
* DLASWP performs a series of row interchanges on the matrix A.
* One row interchange is initiated for each of rows K1 through K2 of A.
*
* Arguments
* =========
*
* N (input) INTEGER
* The number of columns of the matrix A.
*
* A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
* On entry, the matrix of column dimension N to which the row
* interchanges will be applied.
* On exit, the permuted matrix.
*
* LDA (input) INTEGER
* The leading dimension of the array A.
* K1 (input) INTEGER
* The first element of IPIV for which a row interchange will
* be done.
*
* K2 (input) INTEGER
* The last element of IPIV for which a row interchange will
* be done.
*
* IPIV (input) INTEGER array, dimension (K2*abs(INCX))
* The vector of pivot indices. Only the elements in positions
* K1 through K2 of IPIV are accessed.
* IPIV(K) = L implies rows K and L are to be interchanged.
*
* INCX (input) INTEGER
* The increment between successive values of IPIV. If IPIV
* is negative, the pivots are applied in reverse order.
*
*/
#define DLASWP_BLOCK_SIZE 256
// based on swap_rows - in just folds in the loop from the cpu,
// so there is no going in and out of the gpu
__global__ void dlaswp(int n, double *A, int lda, int *ipiv, int k1, int k2)
{
for (; k1 < k2 ; ++k1)
{
int src_row = k1;
int dst_row = ipiv[k1];
for (int col_id = threadIdx.x ; col_id < n ; col_id += blockDim.x)
{
double A_tmp = A[col_id*lda + src_row];
A[col_id*lda + src_row] = A[col_id*lda + dst_row];
A[col_id*lda + dst_row] = A_tmp;
__syncthreads();
}
// TODO: we have very poor coalescing here. Can't we do better? Launch one warp of threads per column and
// ask those threads to reorder the column, for example.
}
}
|
bd69d658e0466e1114680bb089ccb00b73fd79d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/prune_gate_by_capacity_kernel.h"
namespace phi {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T1, typename T2>
__global__ void prune_gate_by_capacity_kernel(const T1* gate_idx_data,
T1* new_gate_idx_data,
T2* expert_count_data,
const int64_t batch_size) {
CUDA_KERNEL_LOOP(i, batch_size) {
auto orig_cap =
phi::CudaAtomicAdd(expert_count_data + gate_idx_data[i], -1);
if (orig_cap <= 0) {
new_gate_idx_data[i] = -1;
} else {
new_gate_idx_data[i] = gate_idx_data[i];
}
}
}
template <typename Context, typename T1>
class PruneGateByCapacityFunctor {
public:
PruneGateByCapacityFunctor(const Context& dev_ctx,
const phi::DenseTensor* gate_idx,
phi::DenseTensor* expert_count_out,
T1* new_gate_idx_data)
: dev_ctx_(dev_ctx),
gate_idx_(gate_idx),
expert_count_out_(expert_count_out),
new_gate_idx_data_(new_gate_idx_data) {}
template <typename T2>
void apply() {
auto batch_size = gate_idx_->numel();
auto* gate_idx_data = gate_idx_->data<T1>();
auto* expert_count_out_data = expert_count_out_->data<T2>();
int blocks = NumBlocks(batch_size);
int threads = kNumCUDAThreads;
hipLaunchKernelGGL(( prune_gate_by_capacity_kernel<T1, T2>)
, dim3(blocks), dim3(threads), 0, dev_ctx_.stream(), gate_idx_data,
new_gate_idx_data_,
expert_count_out_data,
batch_size);
}
private:
const Context& dev_ctx_;
const phi::DenseTensor* gate_idx_;
phi::DenseTensor* expert_count_out_;
T1* new_gate_idx_data_;
};
template <typename Visitor>
static void VisitType(phi::DataType type, Visitor visitor) {
if (type == phi::DataType::INT64) {
visitor.template apply<int64_t>();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"The received values gate_id type %s can not meet input requirements. "
"Because the given gate_id data type of operators must be "
"int64. Please input appropriate gate_id again! ",
"framework::DataTypeToString(type)"));
}
}
template <typename T, typename Context>
void PruneGateByCapacityKernel(const Context& dev_ctx,
const DenseTensor& gate_idx,
const DenseTensor& expert_count,
int64_t n_expert,
int64_t n_worker,
DenseTensor* new_gate_idx) {
auto* gate_idx_ptr = &gate_idx;
// auto* expert_count_out =
// context.Output<phi::DenseTensor>("ExpertCountOut");
auto* new_gate_idx_data = dev_ctx.template Alloc<T>(new_gate_idx);
phi::DenseTensor expert_count_out;
phi::Copy(
dev_ctx, expert_count, dev_ctx.GetPlace(), false, &expert_count_out);
PruneGateByCapacityFunctor<Context, T> functor(
dev_ctx, gate_idx_ptr, &expert_count_out, new_gate_idx_data);
VisitType(expert_count.type(), functor);
}
} // namespace phi
PD_REGISTER_KERNEL(prune_gate_by_capacity,
GPU,
ALL_LAYOUT,
phi::PruneGateByCapacityKernel,
int64_t) {}
| bd69d658e0466e1114680bb089ccb00b73fd79d7.cu | // Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/prune_gate_by_capacity_kernel.h"
namespace phi {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T1, typename T2>
__global__ void prune_gate_by_capacity_kernel(const T1* gate_idx_data,
T1* new_gate_idx_data,
T2* expert_count_data,
const int64_t batch_size) {
CUDA_KERNEL_LOOP(i, batch_size) {
auto orig_cap =
phi::CudaAtomicAdd(expert_count_data + gate_idx_data[i], -1);
if (orig_cap <= 0) {
new_gate_idx_data[i] = -1;
} else {
new_gate_idx_data[i] = gate_idx_data[i];
}
}
}
template <typename Context, typename T1>
class PruneGateByCapacityFunctor {
public:
PruneGateByCapacityFunctor(const Context& dev_ctx,
const phi::DenseTensor* gate_idx,
phi::DenseTensor* expert_count_out,
T1* new_gate_idx_data)
: dev_ctx_(dev_ctx),
gate_idx_(gate_idx),
expert_count_out_(expert_count_out),
new_gate_idx_data_(new_gate_idx_data) {}
template <typename T2>
void apply() {
auto batch_size = gate_idx_->numel();
auto* gate_idx_data = gate_idx_->data<T1>();
auto* expert_count_out_data = expert_count_out_->data<T2>();
int blocks = NumBlocks(batch_size);
int threads = kNumCUDAThreads;
prune_gate_by_capacity_kernel<T1, T2>
<<<blocks, threads, 0, dev_ctx_.stream()>>>(gate_idx_data,
new_gate_idx_data_,
expert_count_out_data,
batch_size);
}
private:
const Context& dev_ctx_;
const phi::DenseTensor* gate_idx_;
phi::DenseTensor* expert_count_out_;
T1* new_gate_idx_data_;
};
template <typename Visitor>
static void VisitType(phi::DataType type, Visitor visitor) {
if (type == phi::DataType::INT64) {
visitor.template apply<int64_t>();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"The received values gate_id type %s can not meet input requirements. "
"Because the given gate_id data type of operators must be "
"int64. Please input appropriate gate_id again! ",
"framework::DataTypeToString(type)"));
}
}
template <typename T, typename Context>
void PruneGateByCapacityKernel(const Context& dev_ctx,
const DenseTensor& gate_idx,
const DenseTensor& expert_count,
int64_t n_expert,
int64_t n_worker,
DenseTensor* new_gate_idx) {
auto* gate_idx_ptr = &gate_idx;
// auto* expert_count_out =
// context.Output<phi::DenseTensor>("ExpertCountOut");
auto* new_gate_idx_data = dev_ctx.template Alloc<T>(new_gate_idx);
phi::DenseTensor expert_count_out;
phi::Copy(
dev_ctx, expert_count, dev_ctx.GetPlace(), false, &expert_count_out);
PruneGateByCapacityFunctor<Context, T> functor(
dev_ctx, gate_idx_ptr, &expert_count_out, new_gate_idx_data);
VisitType(expert_count.type(), functor);
}
} // namespace phi
PD_REGISTER_KERNEL(prune_gate_by_capacity,
GPU,
ALL_LAYOUT,
phi::PruneGateByCapacityKernel,
int64_t) {}
|
69d2c9de0a160c809557794cd447bdad47f83db3.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/opencv.hpp>
#include <thrust/window_2d.h>
using namespace cv;
#define KERNEL_LENGTH 5
__constant__ float c_kernel[KERNEL_LENGTH*KERNEL_LENGTH];
inline float gauss(int x, int y, int mid, float sigma )
{
float temp = (pow(x-mid,2)+pow(y-mid,2))/sigma;
temp= exp(-temp);
return temp;
}
void getGaussianKernelBlock(int dim, float sigma,float *GaussianKernel )
{
assert(dim%2);
int mid = (dim-1)/2;
float total = 0;
for(int i = 0; i<dim;i++)
{
for(int j = 0; j<dim;j++)
{
total+=gauss(i,j,mid,sigma);
(GaussianKernel)[i*dim + j]=gauss(i,j,mid,sigma);
}
}
float newTotal=0;
for(int i = 0; i<dim;i++)
{
for(int j = 0; j<dim;j++)
{
(GaussianKernel)[i*dim + j]/=total;
newTotal += (GaussianKernel)[i*dim + j];
}
}
}
class convolutionFunctor //:public thrust::shared_unary_window_transform_functor<uchar>
{
public:
int dim;
convolutionFunctor(int dim)
{
this->dim =dim;
}
__device__ uchar operator() (const thrust::window_2d<uchar> & input_window,const thrust::window_2d<uchar> & output_window) const
{
uchar temp = 0;
for(int i = 0; i< dim; i++)
{
for(int j = 0; j<dim; j++)
{
temp+=input_window[make_int2(j,i)]*(c_kernel)[i*dim + j];
}
}
output_window[1][1]=temp;
return 0;
}
};
class pyrdownTransformFunctor
{
public:
thrust::block_2d<uchar> *inBlock;
pyrdownTransformFunctor(thrust::block_2d<uchar> * inBlock)
{
this->inBlock = inBlock->device_pointer;
}
__device__ void operator() (const thrust::window_2d<uchar> &outputWindow) const
{
int x_in, y_in;
if(outputWindow.start_x%2 && outputWindow.start_y%2)
{
x_in = outputWindow.start_x*2;
y_in = outputWindow.start_y*2;
outputWindow[0][0]=(*inBlock)[y_in][x_in];
}
}
};
int main(int argc, char const *argv[])
{
hipDeviceProp_t dev_prop;
hipGetDeviceProperties(&dev_prop,0);
Mat small = imread("car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat image;
int dim = 5;
int dim_image = 512;
if(argc ==2)
{
dim_image = atoi(argv[1]);
}
resize(small,image,Size(dim_image,dim_image));
float *hkernel = (float *) std::malloc(sizeof(float) * dim*dim);
getGaussianKernelBlock(dim,5,hkernel);
hipMemcpyToSymbol(c_kernel, hkernel, dim*dim * sizeof(float));
thrust::block_2d<uchar> uchar_image_block (image.cols,image.rows);
thrust::block_2d<uchar> outBlock (image.cols/2,image.rows/2,0.0f);
thrust::block_2d<uchar> output_image_block(image.cols,image.rows);
thrust::block_2d<uchar> null_block (image.cols,image.rows);
uchar * img = (uchar * )malloc(sizeof(uchar)*(uchar_image_block.end()-uchar_image_block.begin()));
uchar * img1 = (uchar * )malloc(sizeof(uchar)*(outBlock.end()-outBlock.begin()));
for(int i = 0; i<image.cols*image.rows;i++)
{
img[i]=(uchar)image.ptr()[i];
}
uchar_image_block.upload(img);
thrust::window_vector<uchar> input_wv(&uchar_image_block,dim,dim,1,1);
thrust::window_vector<uchar> output_wv(&output_image_block,dim,dim,1,1);
thrust::transform(input_wv.begin(),input_wv.end(),output_wv.begin(),null_block.begin(),convolutionFunctor(dim));
thrust::window_vector<uchar> inputVector(&outBlock,1,1,1,1);
pyrdownTransformFunctor ptf(&output_image_block);
thrust::for_each(inputVector.begin(),inputVector.end(),ptf);
unsigned char * outputFloatImageData = (unsigned char *)malloc(sizeof(unsigned char)*(outBlock.end()-outBlock.begin()));
output_image_block.download(&img);
for(int i = 0; i<(outBlock.end()-outBlock.begin());i++)
{
outputFloatImageData[i]=(unsigned char)img1[i];
}
Mat output (Size(image.cols/2,image.rows/2),CV_8UC1,outputFloatImageData);
#ifdef OWRITE
imwrite("input.png",image);
imwrite("output.png",output);
#endif
#ifdef SHOW
imshow("input.png",image);
imshow("output.png",output);
waitKey(0);
#endif
free (img);
free (img1);
free (outputFloatImageData);
return 0;
}
| 69d2c9de0a160c809557794cd447bdad47f83db3.cu | #include <opencv2/opencv.hpp>
#include <thrust/window_2d.h>
using namespace cv;
#define KERNEL_LENGTH 5
__constant__ float c_kernel[KERNEL_LENGTH*KERNEL_LENGTH];
inline float gauss(int x, int y, int mid, float sigma )
{
float temp = (pow(x-mid,2)+pow(y-mid,2))/sigma;
temp= exp(-temp);
return temp;
}
void getGaussianKernelBlock(int dim, float sigma,float *GaussianKernel )
{
assert(dim%2);
int mid = (dim-1)/2;
float total = 0;
for(int i = 0; i<dim;i++)
{
for(int j = 0; j<dim;j++)
{
total+=gauss(i,j,mid,sigma);
(GaussianKernel)[i*dim + j]=gauss(i,j,mid,sigma);
}
}
float newTotal=0;
for(int i = 0; i<dim;i++)
{
for(int j = 0; j<dim;j++)
{
(GaussianKernel)[i*dim + j]/=total;
newTotal += (GaussianKernel)[i*dim + j];
}
}
}
class convolutionFunctor //:public thrust::shared_unary_window_transform_functor<uchar>
{
public:
int dim;
convolutionFunctor(int dim)
{
this->dim =dim;
}
__device__ uchar operator() (const thrust::window_2d<uchar> & input_window,const thrust::window_2d<uchar> & output_window) const
{
uchar temp = 0;
for(int i = 0; i< dim; i++)
{
for(int j = 0; j<dim; j++)
{
temp+=input_window[make_int2(j,i)]*(c_kernel)[i*dim + j];
}
}
output_window[1][1]=temp;
return 0;
}
};
class pyrdownTransformFunctor
{
public:
thrust::block_2d<uchar> *inBlock;
pyrdownTransformFunctor(thrust::block_2d<uchar> * inBlock)
{
this->inBlock = inBlock->device_pointer;
}
__device__ void operator() (const thrust::window_2d<uchar> &outputWindow) const
{
int x_in, y_in;
if(outputWindow.start_x%2 && outputWindow.start_y%2)
{
x_in = outputWindow.start_x*2;
y_in = outputWindow.start_y*2;
outputWindow[0][0]=(*inBlock)[y_in][x_in];
}
}
};
int main(int argc, char const *argv[])
{
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop,0);
Mat small = imread("car.jpg",CV_LOAD_IMAGE_GRAYSCALE);
Mat image;
int dim = 5;
int dim_image = 512;
if(argc ==2)
{
dim_image = atoi(argv[1]);
}
resize(small,image,Size(dim_image,dim_image));
float *hkernel = (float *) std::malloc(sizeof(float) * dim*dim);
getGaussianKernelBlock(dim,5,hkernel);
cudaMemcpyToSymbol(c_kernel, hkernel, dim*dim * sizeof(float));
thrust::block_2d<uchar> uchar_image_block (image.cols,image.rows);
thrust::block_2d<uchar> outBlock (image.cols/2,image.rows/2,0.0f);
thrust::block_2d<uchar> output_image_block(image.cols,image.rows);
thrust::block_2d<uchar> null_block (image.cols,image.rows);
uchar * img = (uchar * )malloc(sizeof(uchar)*(uchar_image_block.end()-uchar_image_block.begin()));
uchar * img1 = (uchar * )malloc(sizeof(uchar)*(outBlock.end()-outBlock.begin()));
for(int i = 0; i<image.cols*image.rows;i++)
{
img[i]=(uchar)image.ptr()[i];
}
uchar_image_block.upload(img);
thrust::window_vector<uchar> input_wv(&uchar_image_block,dim,dim,1,1);
thrust::window_vector<uchar> output_wv(&output_image_block,dim,dim,1,1);
thrust::transform(input_wv.begin(),input_wv.end(),output_wv.begin(),null_block.begin(),convolutionFunctor(dim));
thrust::window_vector<uchar> inputVector(&outBlock,1,1,1,1);
pyrdownTransformFunctor ptf(&output_image_block);
thrust::for_each(inputVector.begin(),inputVector.end(),ptf);
unsigned char * outputFloatImageData = (unsigned char *)malloc(sizeof(unsigned char)*(outBlock.end()-outBlock.begin()));
output_image_block.download(&img);
for(int i = 0; i<(outBlock.end()-outBlock.begin());i++)
{
outputFloatImageData[i]=(unsigned char)img1[i];
}
Mat output (Size(image.cols/2,image.rows/2),CV_8UC1,outputFloatImageData);
#ifdef OWRITE
imwrite("input.png",image);
imwrite("output.png",output);
#endif
#ifdef SHOW
imshow("input.png",image);
imshow("output.png",output);
waitKey(0);
#endif
free (img);
free (img1);
free (outputFloatImageData);
return 0;
}
|
7d196c5854790d548ac9ab8ee93e7dc0dd229b2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <algorithm>
#include <iostream>
#ifdef _WIN32
#include <numeric>
#endif
#include <random>
#define PADDLE_CUDA_FP16
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_helper.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
using paddle::platform::float16;
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void AddKernel(const T* data_a, T* data_b, size_t num) {
CUDA_KERNEL_LOOP(i, num) {
paddle::platform::CudaAtomicAdd(&data_b[i], data_a[i]);
}
}
template <typename T>
struct AddFunctor {
T operator()(const T& a, const T& b) { return a + b; }
};
template <typename T>
void TestCase(size_t num) {
T *in1, *in2, *out;
T *d_in1, *d_in2;
size_t size = sizeof(T) * num;
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), size);
#else
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), size);
#endif
in1 = reinterpret_cast<T*>(malloc(size));
in2 = reinterpret_cast<T*>(malloc(size));
out = reinterpret_cast<T*>(malloc(size));
std::minstd_rand engine;
std::uniform_real_distribution<double> dist(0.0, 1.0);
for (size_t i = 0; i < num; ++i) {
in1[i] = static_cast<T>(dist(engine));
in2[i] = static_cast<T>(dist(engine));
}
#ifdef PADDLE_WITH_HIP
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice);
hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(HIP_KERNEL_NAME(AddKernel<T>),
dim3(1),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
0,
d_in1,
d_in2,
num);
hipDeviceSynchronize();
hipMemcpy(out, d_in2, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#else
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice);
hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( AddKernel<T>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num);
hipDeviceSynchronize();
hipMemcpy(out, d_in2, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#endif
for (size_t i = 0; i < num; ++i) {
// NOTE(dzhwinter): the float16 add has small underflow/overflow
// so we use EXPECT_NEAR to check the result.
EXPECT_NEAR(static_cast<float>(out[i]),
static_cast<float>(AddFunctor<T>()(in1[i], in2[i])),
0.001);
}
free(in1);
free(in2);
free(out);
#ifdef PADDLE_WITH_HIP
hipFree(d_in1);
hipFree(d_in2);
#else
hipFree(d_in1);
hipFree(d_in2);
#endif
}
// cuda primitives
TEST(CudaAtomic, Add) {
TestCase<float>(static_cast<size_t>(10));
TestCase<float>(static_cast<size_t>(1024 * 1024));
TestCase<double>(static_cast<size_t>(10));
TestCase<double>(static_cast<size_t>(1024 * 1024));
}
TEST(CudaAtomic, float16) {
TestCase<float16>(static_cast<size_t>(1));
TestCase<float16>(static_cast<size_t>(2));
TestCase<float16>(static_cast<size_t>(3));
TestCase<float16>(static_cast<size_t>(10));
TestCase<float16>(static_cast<size_t>(1024 * 1024));
}
// unalignment of uint8
void TestUnalign(size_t num, const int shift_bit) {
ASSERT_EQ(num % 2, 0);
float16 *in1, *in2, *out;
float16 *d_in1, *d_in2;
size_t size = sizeof(uint8_t) * (num + shift_bit);
size_t array_size = sizeof(float16) * (num / 2);
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), size);
#else
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), size);
#endif
in1 = reinterpret_cast<float16*>(malloc(size));
in2 = reinterpret_cast<float16*>(malloc(size));
out = reinterpret_cast<float16*>(malloc(size));
// right shift 1, mimic the unalignment of address
float16* r_in1 =
reinterpret_cast<float16*>(reinterpret_cast<uint8_t*>(in1) + shift_bit);
float16* r_in2 =
reinterpret_cast<float16*>(reinterpret_cast<uint8_t*>(in2) + shift_bit);
std::minstd_rand engine;
std::uniform_real_distribution<double> dist(0.0, 1.0);
for (size_t i = 0; i < num / 2; ++i) {
r_in1[i] = static_cast<float16>(dist(engine));
r_in2[i] = static_cast<float16>(dist(engine));
}
#ifdef PADDLE_WITH_HIP
hipMemcpy(d_in1, r_in1, array_size, hipMemcpyHostToDevice);
hipMemcpy(d_in2, r_in2, array_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(HIP_KERNEL_NAME(AddKernel<float16>),
dim3(1),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
0,
d_in1,
d_in2,
num / 2);
hipDeviceSynchronize();
hipMemcpy(out, d_in2, array_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#else
hipMemcpy(d_in1, r_in1, array_size, hipMemcpyHostToDevice);
hipMemcpy(d_in2, r_in2, array_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( AddKernel<float16>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num / 2);
hipDeviceSynchronize();
hipMemcpy(out, d_in2, array_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#endif
for (size_t i = 0; i < num / 2; ++i) {
// NOTE(dzhwinter): the float16 add has small truncate error.
// so we use EXPECT_NEAR to check the result.
EXPECT_NEAR(static_cast<float>(out[i]),
static_cast<float>(AddFunctor<float16>()(r_in1[i], r_in2[i])),
0.001);
}
free(in1);
free(in2);
free(out);
#ifdef PADDLE_WITH_HIP
hipFree(d_in1);
hipFree(d_in2);
#else
hipFree(d_in1);
hipFree(d_in2);
#endif
}
TEST(CudaAtomic, float16Unalign) {
// same with float16 testcase
TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 2);
TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 2);
TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 2);
// shift the address.
TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 1);
TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 1);
TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 1);
TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 3);
TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 3);
TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 3);
}
// https://devblogs.nvidia.com/faster-parallel-reductions-kepler/
template <typename T>
static __forceinline__ __device__ T WarpReduceSum(T val) {
unsigned mask = 0u;
CREATE_SHFL_MASK(mask, true);
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
val += paddle::platform::CudaShuffleDownSync(mask, val, offset);
}
return val;
}
template <typename T>
__forceinline__ __device__ T BlockReduce(T val) {
static __shared__ T shared[32]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = WarpReduceSum(val); // Each warp performs partial reduction
if (lane == 0) shared[wid] = val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
// read from shared memory only if that warp existed
val =
(threadIdx.x < blockDim.x / warpSize) ? shared[lane] : static_cast<T>(0);
if (wid == 0) val = WarpReduceSum(val); // Final reduce within first warp
return val;
}
template <typename T>
__global__ void DeviceReduceSum(T* in, T* out, size_t N) {
T sum(0);
CUDA_KERNEL_LOOP(i, N) { sum += in[i]; }
sum = BlockReduce<T>(sum);
__syncthreads();
if (threadIdx.x == 0) out[blockIdx.x] = sum;
}
template <typename T>
void TestReduce(size_t num, float atol = 0.01) {
T* in1;
T *d_in1, *d_in2;
size_t size = sizeof(T) * num;
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), sizeof(T));
#else
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), sizeof(T));
#endif
in1 = reinterpret_cast<T*>(malloc(size));
std::minstd_rand engine;
std::uniform_real_distribution<double> dist(0.0, 1.0);
for (size_t i = 0; i < num; ++i) {
in1[i] = static_cast<T>(dist(engine));
}
auto out = std::accumulate(in1, in1 + num, static_cast<T>(0));
#ifdef PADDLE_WITH_HIP
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(HIP_KERNEL_NAME(DeviceReduceSum<T>),
dim3(1),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
0,
d_in1,
d_in2,
num);
hipMemcpy(in1, d_in2, sizeof(T), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#else
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(( DeviceReduceSum<T>), dim3(1), dim3(PADDLE_CUDA_NUM_THREADS), 0, 0, d_in1, d_in2, num);
hipMemcpy(in1, d_in2, sizeof(T), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#endif
// NOTE(dzhwinter): the float16 add has small underflow/overflow
// so we use EXPECT_NEAR to check the result.
EXPECT_NEAR(static_cast<float>(in1[0]), static_cast<float>(out), atol);
free(in1);
#ifdef PADDLE_WITH_HIP
hipFree(d_in1);
hipFree(d_in2);
#else
hipFree(d_in1);
hipFree(d_in2);
#endif
}
TEST(CudaShuffleSync, float16) {
TestReduce<float>(10);
TestReduce<float>(1000);
// float16 will overflow or accumulate truncate errors in big size.
TestReduce<float16>(10);
TestReduce<float16>(100, /*atol error*/ 1.0);
}
| 7d196c5854790d548ac9ab8ee93e7dc0dd229b2d.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <algorithm>
#include <iostream>
#ifdef _WIN32
#include <numeric>
#endif
#include <random>
#define PADDLE_CUDA_FP16
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_helper.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
using paddle::platform::float16;
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void AddKernel(const T* data_a, T* data_b, size_t num) {
CUDA_KERNEL_LOOP(i, num) {
paddle::platform::CudaAtomicAdd(&data_b[i], data_a[i]);
}
}
template <typename T>
struct AddFunctor {
T operator()(const T& a, const T& b) { return a + b; }
};
template <typename T>
void TestCase(size_t num) {
T *in1, *in2, *out;
T *d_in1, *d_in2;
size_t size = sizeof(T) * num;
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), size);
#else
cudaMalloc(reinterpret_cast<void**>(&d_in1), size);
cudaMalloc(reinterpret_cast<void**>(&d_in2), size);
#endif
in1 = reinterpret_cast<T*>(malloc(size));
in2 = reinterpret_cast<T*>(malloc(size));
out = reinterpret_cast<T*>(malloc(size));
std::minstd_rand engine;
std::uniform_real_distribution<double> dist(0.0, 1.0);
for (size_t i = 0; i < num; ++i) {
in1[i] = static_cast<T>(dist(engine));
in2[i] = static_cast<T>(dist(engine));
}
#ifdef PADDLE_WITH_HIP
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice);
hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(HIP_KERNEL_NAME(AddKernel<T>),
dim3(1),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
0,
d_in1,
d_in2,
num);
hipDeviceSynchronize();
hipMemcpy(out, d_in2, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#else
cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice);
AddKernel<T><<<1, PADDLE_CUDA_NUM_THREADS>>>(d_in1, d_in2, num);
cudaDeviceSynchronize();
cudaMemcpy(out, d_in2, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
#endif
for (size_t i = 0; i < num; ++i) {
// NOTE(dzhwinter): the float16 add has small underflow/overflow
// so we use EXPECT_NEAR to check the result.
EXPECT_NEAR(static_cast<float>(out[i]),
static_cast<float>(AddFunctor<T>()(in1[i], in2[i])),
0.001);
}
free(in1);
free(in2);
free(out);
#ifdef PADDLE_WITH_HIP
hipFree(d_in1);
hipFree(d_in2);
#else
cudaFree(d_in1);
cudaFree(d_in2);
#endif
}
// cuda primitives
TEST(CudaAtomic, Add) {
TestCase<float>(static_cast<size_t>(10));
TestCase<float>(static_cast<size_t>(1024 * 1024));
TestCase<double>(static_cast<size_t>(10));
TestCase<double>(static_cast<size_t>(1024 * 1024));
}
TEST(CudaAtomic, float16) {
TestCase<float16>(static_cast<size_t>(1));
TestCase<float16>(static_cast<size_t>(2));
TestCase<float16>(static_cast<size_t>(3));
TestCase<float16>(static_cast<size_t>(10));
TestCase<float16>(static_cast<size_t>(1024 * 1024));
}
// unalignment of uint8
void TestUnalign(size_t num, const int shift_bit) {
ASSERT_EQ(num % 2, 0);
float16 *in1, *in2, *out;
float16 *d_in1, *d_in2;
size_t size = sizeof(uint8_t) * (num + shift_bit);
size_t array_size = sizeof(float16) * (num / 2);
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), size);
#else
cudaMalloc(reinterpret_cast<void**>(&d_in1), size);
cudaMalloc(reinterpret_cast<void**>(&d_in2), size);
#endif
in1 = reinterpret_cast<float16*>(malloc(size));
in2 = reinterpret_cast<float16*>(malloc(size));
out = reinterpret_cast<float16*>(malloc(size));
// right shift 1, mimic the unalignment of address
float16* r_in1 =
reinterpret_cast<float16*>(reinterpret_cast<uint8_t*>(in1) + shift_bit);
float16* r_in2 =
reinterpret_cast<float16*>(reinterpret_cast<uint8_t*>(in2) + shift_bit);
std::minstd_rand engine;
std::uniform_real_distribution<double> dist(0.0, 1.0);
for (size_t i = 0; i < num / 2; ++i) {
r_in1[i] = static_cast<float16>(dist(engine));
r_in2[i] = static_cast<float16>(dist(engine));
}
#ifdef PADDLE_WITH_HIP
hipMemcpy(d_in1, r_in1, array_size, hipMemcpyHostToDevice);
hipMemcpy(d_in2, r_in2, array_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(HIP_KERNEL_NAME(AddKernel<float16>),
dim3(1),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
0,
d_in1,
d_in2,
num / 2);
hipDeviceSynchronize();
hipMemcpy(out, d_in2, array_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#else
cudaMemcpy(d_in1, r_in1, array_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, r_in2, array_size, cudaMemcpyHostToDevice);
AddKernel<float16><<<1, PADDLE_CUDA_NUM_THREADS>>>(d_in1, d_in2, num / 2);
cudaDeviceSynchronize();
cudaMemcpy(out, d_in2, array_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
#endif
for (size_t i = 0; i < num / 2; ++i) {
// NOTE(dzhwinter): the float16 add has small truncate error.
// so we use EXPECT_NEAR to check the result.
EXPECT_NEAR(static_cast<float>(out[i]),
static_cast<float>(AddFunctor<float16>()(r_in1[i], r_in2[i])),
0.001);
}
free(in1);
free(in2);
free(out);
#ifdef PADDLE_WITH_HIP
hipFree(d_in1);
hipFree(d_in2);
#else
cudaFree(d_in1);
cudaFree(d_in2);
#endif
}
TEST(CudaAtomic, float16Unalign) {
// same with float16 testcase
TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 2);
TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 2);
TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 2);
// shift the address.
TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 1);
TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 1);
TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 1);
TestUnalign(static_cast<size_t>(2), /*shift_bit*/ 3);
TestUnalign(static_cast<size_t>(1024), /*shift_bit*/ 3);
TestUnalign(static_cast<size_t>(1024 * 1024), /*shift_bit*/ 3);
}
// https://devblogs.nvidia.com/faster-parallel-reductions-kepler/
template <typename T>
static __forceinline__ __device__ T WarpReduceSum(T val) {
unsigned mask = 0u;
CREATE_SHFL_MASK(mask, true);
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
val += paddle::platform::CudaShuffleDownSync(mask, val, offset);
}
return val;
}
template <typename T>
__forceinline__ __device__ T BlockReduce(T val) {
static __shared__ T shared[32]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = WarpReduceSum(val); // Each warp performs partial reduction
if (lane == 0) shared[wid] = val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
// read from shared memory only if that warp existed
val =
(threadIdx.x < blockDim.x / warpSize) ? shared[lane] : static_cast<T>(0);
if (wid == 0) val = WarpReduceSum(val); // Final reduce within first warp
return val;
}
template <typename T>
__global__ void DeviceReduceSum(T* in, T* out, size_t N) {
T sum(0);
CUDA_KERNEL_LOOP(i, N) { sum += in[i]; }
sum = BlockReduce<T>(sum);
__syncthreads();
if (threadIdx.x == 0) out[blockIdx.x] = sum;
}
template <typename T>
void TestReduce(size_t num, float atol = 0.01) {
T* in1;
T *d_in1, *d_in2;
size_t size = sizeof(T) * num;
#ifdef PADDLE_WITH_HIP
hipMalloc(reinterpret_cast<void**>(&d_in1), size);
hipMalloc(reinterpret_cast<void**>(&d_in2), sizeof(T));
#else
cudaMalloc(reinterpret_cast<void**>(&d_in1), size);
cudaMalloc(reinterpret_cast<void**>(&d_in2), sizeof(T));
#endif
in1 = reinterpret_cast<T*>(malloc(size));
std::minstd_rand engine;
std::uniform_real_distribution<double> dist(0.0, 1.0);
for (size_t i = 0; i < num; ++i) {
in1[i] = static_cast<T>(dist(engine));
}
auto out = std::accumulate(in1, in1 + num, static_cast<T>(0));
#ifdef PADDLE_WITH_HIP
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(HIP_KERNEL_NAME(DeviceReduceSum<T>),
dim3(1),
dim3(PADDLE_CUDA_NUM_THREADS),
0,
0,
d_in1,
d_in2,
num);
hipMemcpy(in1, d_in2, sizeof(T), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
#else
cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
DeviceReduceSum<T><<<1, PADDLE_CUDA_NUM_THREADS>>>(d_in1, d_in2, num);
cudaMemcpy(in1, d_in2, sizeof(T), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
#endif
// NOTE(dzhwinter): the float16 add has small underflow/overflow
// so we use EXPECT_NEAR to check the result.
EXPECT_NEAR(static_cast<float>(in1[0]), static_cast<float>(out), atol);
free(in1);
#ifdef PADDLE_WITH_HIP
hipFree(d_in1);
hipFree(d_in2);
#else
cudaFree(d_in1);
cudaFree(d_in2);
#endif
}
TEST(CudaShuffleSync, float16) {
TestReduce<float>(10);
TestReduce<float>(1000);
// float16 will overflow or accumulate truncate errors in big size.
TestReduce<float16>(10);
TestReduce<float16>(100, /*atol error*/ 1.0);
}
|
26029a2a08ca8254a3c14f1f962195422e240202.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <hip/hip_runtime.h>
// Thread block size
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 16
#endif
/*__global__ void scan(float *g_odata, float *g_idata, int n) {
extern __shared__ float temp[]; // allocated on invocation
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
int localIndex = threadIdx.x;
temp[localIndex] = g_idata[globalIndex];
__syncthreads();
for (int offset = 1; offset < BLOCK_SIZE; offset *= 2) {
if (localIndex >= offset)
temp[localIndex] += temp[localIndex - offset];
__syncthreads();
}
g_odata[globalIndex] = temp[localIndex]; // write output
}*/
__global__ void scan(float *g_odata, float *g_idata, int n) {
extern __shared__ float temp[]; // allocated on invocation
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
int localIndex = threadIdx.x;
temp[localIndex] = g_idata[globalIndex];
__syncthreads();
for (int offset = 1; offset < BLOCK_SIZE; offset *= 2) {
if (localIndex >= offset)
temp[localIndex] += temp[localIndex - offset];
__syncthreads();
}
g_idata[globalIndex] = temp[localIndex];
if(localIndex == BLOCK_SIZE-1)
{
g_odata[blockIdx.x] = temp[localIndex]; // write output
//printf("block=%d | temp[%d]=%f\n", blockIdx.x, localIndex, temp[localIndex]);
}
}
__global__ void scan_block(float *g_odata, float *g_idata, int step, int n) {
extern __shared__ float temp[]; // allocated on invocation
//int globalIndex = blockIdx.x * blockDim.x + blockDim.x * (threadIdx.x + step) + blockDim.x - 1;
int globalIndex = blockIdx.x * blockDim.x + blockDim.x * step + blockDim.x - 1;
int localIndex = threadIdx.x;
if(globalIndex < n-1) {
//printf("global value=%d, global index=%d\n", g_odata[globalIndex], globalIndex);
temp[localIndex] = g_odata[globalIndex];
__syncthreads();
for (int offset = 1; offset < BLOCK_SIZE; offset *= 2) {
if (localIndex >= offset)
temp[localIndex] += temp[localIndex - offset];
__syncthreads();
}
g_idata[globalIndex] = temp[localIndex]; // write output
}
}
__global__ void broadcast_sum(float *g_odata, float *g_idata, int step, int n) {
extern __shared__ float temp[]; // allocated on invocation
//int globalIndex = blockIdx.x * blockDim.x + blockDim.x * (threadIdx.x + step) + blockDim.x - 1;
int globalIndex = blockIdx.x * blockDim.x + blockDim.x * step + blockDim.x - 1;
int localIndex = threadIdx.x;
if(globalIndex < n-1) {
//printf("global value=%d, global index=%d\n", g_odata[globalIndex], globalIndex);
temp[localIndex] = g_odata[globalIndex];
__syncthreads();
for (int offset = 1; offset < BLOCK_SIZE; offset *= 2) {
if (localIndex >= offset)
temp[localIndex] += temp[localIndex - offset];
__syncthreads();
}
g_idata[globalIndex] = temp[localIndex]; // write output
}
}
void print(float* x, const int n) {
for (int i = 0; i < n; i++) {
std::cout << x[i] << " ";
}
std::cout << "\n";
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void PrefixSum(float* odata, float* idata, const int n) {
// Load A and B to device memory
float* g_idata;
float* g_odata;
// Invoke kernel
//dim3 dimBlock(BLOCK_SIZE);
//dim3 dimGrid(n / BLOCK_SIZE, 1);
int block = BLOCK_SIZE;
int grid = n / BLOCK_SIZE;
size_t size = n * sizeof(float);
size_t block_size = grid * sizeof(float);
hipMalloc(&g_idata, size);
hipMalloc(&g_odata, block_size);
hipMemcpy(g_idata, idata, size, hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
hipLaunchKernelGGL(( scan), dim3(grid), dim3(block), block, 0, g_odata, g_idata, n);
hipError_t errSync = hipGetLastError();
hipError_t errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("4: Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("4: Async kernel error: %s\n", hipGetErrorString(errAsync));
//for (int step = 0; step < grid; step++) {
/*scan_block<<<grid, block, block>>>(g_odata, g_idata, step, n);
errSync = hipGetLastError();
errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("4: Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("4: Async kernel error: %s\n", hipGetErrorString(errAsync));
broadcast_sum<<<grid, block, block>>>(g_odata, g_idata, step, n);
errSync = hipGetLastError();
errAsync = hipDeviceSynchronize();
if (errSync != hipSuccess)
printf("4: Sync kernel error: %s\n", hipGetErrorString(errSync));
if (errAsync != hipSuccess)
printf("4: Async kernel error: %s\n", hipGetErrorString(errAsync));
}*/
hipEventRecord(stop);
// Read C from device memory
hipMemcpy(odata, g_odata, block_size, hipMemcpyDeviceToHost);
if (ELAPSED_TIME == 1) {
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
} else {
print(idata, n);
print(odata, grid);
}
// Free device memory
hipFree(g_odata);
hipFree(g_idata);
}
int main() {
int n;
scanf("%d", &n);
float* idata, *odata;
int size = n * sizeof(float);
idata = new float[size];
odata = new float[size];
for (int i = 0; i < n; i++)
scanf("%f", &idata[i]);
print(idata, n);
print(odata, n);
PrefixSum(odata, idata, n);
printf("result=%f\n", odata[n - 1]);
free(odata);
free(idata);
return 0;
}
| 26029a2a08ca8254a3c14f1f962195422e240202.cu | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cuda.h>
// Thread block size
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 16
#endif
/*__global__ void scan(float *g_odata, float *g_idata, int n) {
extern __shared__ float temp[]; // allocated on invocation
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
int localIndex = threadIdx.x;
temp[localIndex] = g_idata[globalIndex];
__syncthreads();
for (int offset = 1; offset < BLOCK_SIZE; offset *= 2) {
if (localIndex >= offset)
temp[localIndex] += temp[localIndex - offset];
__syncthreads();
}
g_odata[globalIndex] = temp[localIndex]; // write output
}*/
__global__ void scan(float *g_odata, float *g_idata, int n) {
extern __shared__ float temp[]; // allocated on invocation
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
int localIndex = threadIdx.x;
temp[localIndex] = g_idata[globalIndex];
__syncthreads();
for (int offset = 1; offset < BLOCK_SIZE; offset *= 2) {
if (localIndex >= offset)
temp[localIndex] += temp[localIndex - offset];
__syncthreads();
}
g_idata[globalIndex] = temp[localIndex];
if(localIndex == BLOCK_SIZE-1)
{
g_odata[blockIdx.x] = temp[localIndex]; // write output
//printf("block=%d | temp[%d]=%f\n", blockIdx.x, localIndex, temp[localIndex]);
}
}
__global__ void scan_block(float *g_odata, float *g_idata, int step, int n) {
extern __shared__ float temp[]; // allocated on invocation
//int globalIndex = blockIdx.x * blockDim.x + blockDim.x * (threadIdx.x + step) + blockDim.x - 1;
int globalIndex = blockIdx.x * blockDim.x + blockDim.x * step + blockDim.x - 1;
int localIndex = threadIdx.x;
if(globalIndex < n-1) {
//printf("global value=%d, global index=%d\n", g_odata[globalIndex], globalIndex);
temp[localIndex] = g_odata[globalIndex];
__syncthreads();
for (int offset = 1; offset < BLOCK_SIZE; offset *= 2) {
if (localIndex >= offset)
temp[localIndex] += temp[localIndex - offset];
__syncthreads();
}
g_idata[globalIndex] = temp[localIndex]; // write output
}
}
__global__ void broadcast_sum(float *g_odata, float *g_idata, int step, int n) {
extern __shared__ float temp[]; // allocated on invocation
//int globalIndex = blockIdx.x * blockDim.x + blockDim.x * (threadIdx.x + step) + blockDim.x - 1;
int globalIndex = blockIdx.x * blockDim.x + blockDim.x * step + blockDim.x - 1;
int localIndex = threadIdx.x;
if(globalIndex < n-1) {
//printf("global value=%d, global index=%d\n", g_odata[globalIndex], globalIndex);
temp[localIndex] = g_odata[globalIndex];
__syncthreads();
for (int offset = 1; offset < BLOCK_SIZE; offset *= 2) {
if (localIndex >= offset)
temp[localIndex] += temp[localIndex - offset];
__syncthreads();
}
g_idata[globalIndex] = temp[localIndex]; // write output
}
}
void print(float* x, const int n) {
for (int i = 0; i < n; i++) {
std::cout << x[i] << " ";
}
std::cout << "\n";
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void PrefixSum(float* odata, float* idata, const int n) {
// Load A and B to device memory
float* g_idata;
float* g_odata;
// Invoke kernel
//dim3 dimBlock(BLOCK_SIZE);
//dim3 dimGrid(n / BLOCK_SIZE, 1);
int block = BLOCK_SIZE;
int grid = n / BLOCK_SIZE;
size_t size = n * sizeof(float);
size_t block_size = grid * sizeof(float);
cudaMalloc(&g_idata, size);
cudaMalloc(&g_odata, block_size);
cudaMemcpy(g_idata, idata, size, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
scan<<<grid, block, block>>>(g_odata, g_idata, n);
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("4: Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("4: Async kernel error: %s\n", cudaGetErrorString(errAsync));
//for (int step = 0; step < grid; step++) {
/*scan_block<<<grid, block, block>>>(g_odata, g_idata, step, n);
errSync = cudaGetLastError();
errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("4: Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("4: Async kernel error: %s\n", cudaGetErrorString(errAsync));
broadcast_sum<<<grid, block, block>>>(g_odata, g_idata, step, n);
errSync = cudaGetLastError();
errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("4: Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("4: Async kernel error: %s\n", cudaGetErrorString(errAsync));
}*/
cudaEventRecord(stop);
// Read C from device memory
cudaMemcpy(odata, g_odata, block_size, cudaMemcpyDeviceToHost);
if (ELAPSED_TIME == 1) {
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n";
} else {
print(idata, n);
print(odata, grid);
}
// Free device memory
cudaFree(g_odata);
cudaFree(g_idata);
}
int main() {
int n;
scanf("%d", &n);
float* idata, *odata;
int size = n * sizeof(float);
idata = new float[size];
odata = new float[size];
for (int i = 0; i < n; i++)
scanf("%f", &idata[i]);
print(idata, n);
print(odata, n);
PrefixSum(odata, idata, n);
printf("result=%f\n", odata[n - 1]);
free(odata);
free(idata);
return 0;
}
|
8291326660a29d75f75e0d4f26ebaab4253a87a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <rocblas.h>
#include <iostream>
#include <stdio.h>
#define DATATYPE float
#define arraySize 5
#define threadnum 16
#define blocknum 16
#define arrayNsize 10
#define arrayMsize 15
#define arraysize 5
#define single 1
//
#define arraysizeM 10
#define arraysizeL 10
#define arraysizeN 10
#define threadnx 2
hipError_t addWithCuda(int* c, int* a, int* b, int size);
//
__global__ void addKernel(int* c, int* a, int* b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
//blockthread
__global__ void vector_add_gpu_1(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
for (int i = 0; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
//blockthread
__global__ void vector_add_gpu_2(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
int tid = threadIdx.x;
const int t_n = blockDim.x;//
while (tid < n) {
c[tid] = a[tid] + b[tid];
tid += t_n;
}
}
//blockthread
__global__ void vector_add_gpu_3(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
//tid=blockIdx.x*blockDim.x+threadIdx.xgirdthread(gridDim.x*blockDim.x)
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int t_n = gridDim.x * blockDim.x;
int tid = bidx * blockDim.x + tidx;
while (tid < n) {
c[tid] = a[tid] + b[tid];
tid += t_n;
}
}
//mn
void vector_add_mn(DATATYPE** a, DATATYPE** b, DATATYPE** c, int m, int n) {
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
c[i][j] = a[i][j] + b[i][j];
}
}
}
//m n
__global__ void vector_add_gpu_4(DATATYPE(*a)[arrayNsize], DATATYPE(*b)[arrayNsize], DATATYPE(*c)[arrayNsize], int m, int n) {
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int tidy = threadIdx.y + blockDim.y * blockIdx.y;
while (tid < m && tidy < m) {
c[tid][tidy] = a[tid][tidy] + b[tid][tidy];
}
}
//CPU
void vector_dot_product_serial(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
double temp = 0;
for (int i = 0; i < n; ++i) {
temp += a[i] * b[i];
}
*c = temp;
}
//GPU
__global__ void vector_dot_product_gpu_1(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
__shared__ DATATYPE tmp[threadnum];
const int tidx = threadIdx.x;
const int t_n = blockDim.x;
int tid = tidx;
double temp = 0.0;
while (tid < n) {
temp += a[tid] * b[tid];
tid += t_n;
tmp[tidx] = temp;
__syncthreads();
int i = 2, j = 1;
while (i <= threadnum) {
if ((tidx % i) == 0) {
tmp[tidx] += tmp[tidx + j];
}
__syncthreads();
i *= 2;
j *= 2;
}
if (tidx == 0) {
c[0] = tmp[0];
}
}
}
//block
__global__ void vector_dot_product_gpu_2(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
__shared__ DATATYPE tmp[threadnum];
const int tidx = threadIdx.x;
const int t_n = blockDim.x;
int tid = tidx;
double temp = 0.0;
while (tid < n) {
temp += a[tid] * b[tid];
tid += t_n;
}
tmp[tidx] = temp;
__syncthreads();
int i = threadnum / 2;
while (i != 0) {
if (tidx < i) {
tmp[tidx] += tmp[tidx + i];
}
__syncthreads();
i /= 2;
}
if (tidx == 0) {
c[0] = tmp[0];
}
}
//blockCPU
__global__ void vector_dot_product_gpu_3(DATATYPE* a, DATATYPE* b, DATATYPE* c_tmp, int n) {
__shared__ DATATYPE tmp[threadnum];
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int t_n = blockDim.x * gridDim.x;
int tid = bidx * blockDim.x + tidx;
double temp = 0.0;
while (tid < n) {
temp += a[tid] * b[tid];
tid += t_n;
}
tmp[tidx] = temp;
__syncthreads();
int i = threadnum / 2;
while (i != 0) {
if (tidx < i) {
tmp[tidx] += tmp[tidx + i];
}
__syncthreads();
i /= 2;
}
if (tidx == 0) {
c_tmp[bidx] = tmp[0];
}
}
//GPU
__global__ void vector_dot_product_gpu_4(float* result_tmp, float* result) {
__shared__ float temp[blocknum];
const int tidx = threadIdx.x;
temp[tidx] = result_tmp[tidx];
__syncthreads();
int i = blocknum / 2;
while (i != 0) {
if (tidx < i) {
temp[tidx] += temp[tidx + i];
}
__syncthreads();
i /= 2;
}
if (tidx == 0) {
result[0] = temp[0];
}
}
//block(
__global__ void vector_dot_product_gpu_5_0(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
if ((threadIdx.x == 0) && (blockIdx.x == 0)) {
c[0] = 0.0;
}
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int t_n = blockDim.x * gridDim.x;
int tid = bidx * blockDim.x + tidx;
double temp = 0.0;
while (tid < n) {
temp += a[tid] * b[tid];
tid += t_n;
}
atomicAdd(c, temp);
}
//blockblockblock
__global__ void vector_dot_product_gpu_5(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
if ((threadIdx.x == 0) && (blockIdx.x == 0)) {
c[0] = 0.0;
}
__shared__ DATATYPE tmp[threadnum];
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int t_n = blockDim.x * gridDim.x;
int tid = bidx * blockDim.x + tidx;
double temp = 0.0;
while (tid < n) {
temp += a[tid] * b[tid];
tid += t_n;
}
tmp[tidx] = temp;
__syncthreads();
int i = blockDim.x / 2;
while (i != 0) {
if (tidx < i) {
tmp[tidx] += tmp[tidx + i];
}
__syncthreads();
i /= 2;
}
if (tidx == 0) {
atomicAdd(c, tmp[0]);
}
}
//block
__device__ void vector_dot(DATATYPE* out, volatile DATATYPE* tmp) {
const int tidx = threadIdx.x;
int i = blockDim.x / 2;
while (i != 0) {
if (tidx < i) {
tmp[tidx] += tmp[tidx + i];
}
__syncthreads();
i /= 2;
}
if (tidx == 0) {
out[0] = tmp[0];
}
}
__device__ unsigned int lockcount = 0;
__global__ void vector_dot_product_gpu_6(DATATYPE* a, DATATYPE* b, DATATYPE* c_tmp, DATATYPE* c, int n) {
__shared__ DATATYPE tmp[threadnum];
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int t_n = blockDim.x * gridDim.x;
int tid = bidx * blockDim.x + tidx;
double temp = 0.0;
while (tid < n) {
temp += a[tid] * b[tid];
tid += t_n;
}
tmp[tidx] = temp;
__syncthreads();
vector_dot(&c_tmp[blockIdx.x], tmp);
__shared__ bool lock;
__threadfence();
if (tidx == 0) {
unsigned int lockiii = atomicAdd(&lockcount, 1);
lock = (lockcount == gridDim.x);
}
__syncthreads();
if (lock) {
tmp[tidx] = c_tmp[tidx];
__syncthreads();
vector_dot(c, tmp);
lockcount = 0;
}
}
//
void vector_add_serial(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
for (int i = 0; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
/*
int main()
{
//dim3 blocknum(1);
//dim3 threadnum(arrayMsize,arrayNsize);
float a[arraySize] = { 1, 2, 3, 4, 5 };
float b[arraySize] = { 10, 20, 30, 40, 50 };
float c[arraySize] = { 0 };
float c1[arraySize] = { 0 };
float **aa;
float **bb;
float **cc;
aa = (float**)malloc(sizeof(float*) * arrayMsize);
bb = (float**)malloc(sizeof(float*) * arrayMsize);
cc = (float**)malloc(sizeof(float*) * arrayMsize);
for (int i = 0; i < arrayMsize; ++i) {
aa[i] = (float*)malloc(sizeof(float*) * arrayNsize);
bb[i] = (float*)malloc(sizeof(float*) * arrayNsize);
cc[i] = (float*)malloc(sizeof(float*) * arrayNsize);
}
for (int i = 0; i < arrayMsize; ++i) {
for (int j = 0; j < arrayNsize; ++j) {
aa[i][j] = j;
bb[i][j] = j * 10;
cc[i][j] = 0;
}
}
//
vector_add_serial(a, b, c, arraySize);
//printf("serial :{1,2,3,4,5} + {10,20,30,40,50} = {%f,%f,%f,%f,%f}\n",c[0], c[1], c[2], c[3], c[4]);
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//blockthread
//GPU
DATATYPE* d_a, * d_b, * d_c,* d_c_tmp;
hipMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
//GPU
hipMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
//
vector_add_gpu_1<<<single,single>>>(d_a, d_b, d_c, arraySize);
//CPU
hipMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, hipMemcpyDeviceToHost);
//
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
//printf("single block single thread :{1,2,3,4,5} + {10,20,30,40,50} = {%f,%f,%f,%f,%f}\n",c[0], c[1], c[2], c[3], c[4]);
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//blockthread
hipMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
//GPU
hipMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
//
vector_add_gpu_2 <<<1, threadnum >>> (d_a, d_b, d_c, arraySize);
//CPU
hipMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, hipMemcpyDeviceToHost);
//
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
//printf("single block multiple thread :{1,2,3,4,5} + {10,20,30,40,50} = {%f,%f,%f,%f,%f}\n",c[0], c[1], c[2], c[3], c[4]);
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//blockthread
hipMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
//GPU
hipMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
//
vector_add_gpu_3 <<<blocknum, threadnum >>> (d_a, d_b, d_c, arraySize);
//CPU
hipMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, hipMemcpyDeviceToHost);
//
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
//printf("multiple block multiple thread :{1,2,3,4,5} + {10,20,30,40,50} = {%f,%f,%f,%f,%f}\n",c[0], c[1], c[2], c[3], c[4]);
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//cublas
DATATYPE* d_aa, * d_bb;
hipblasHandle_t handle;
hipblasCreate(&handle);
hipMalloc((void**)&d_aa, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_bb, sizeof(DATATYPE) * arraySize);
float alpha = 1.0;
hipblasSetVector(arraySize, sizeof(DATATYPE), a, 1, d_aa, 1);
hipblasSetVector(arraySize, sizeof(DATATYPE), b, 1, d_bb, 1);
hipblasSaxpy(handle, arraySize, &alpha, d_aa, 1, d_bb, 1);
hipblasGetVector(arraySize, sizeof(DATATYPE), d_bb, 1, c1, 1);
hipFree(d_aa);
hipFree(d_bb);
hipblasDestroy(handle);
//printf("cublas :{1,2,3,4,5} + {10,20,30,40,50} = {%f,%f,%f,%f,%f}\n",c1[0], c1[1], c1[2], c1[3], c1[4]);
for (int i = 0; i < arraySize; ++i) {
c1[i] = 0;
}
//mn
//DATATYPE(*d_aaa)[arrayNsize], (*d_bbb)[arrayNsize], (*d_ccc)[arrayNsize];
//hipMalloc((void**)&d_aaa, sizeof(DATATYPE) * arrayMsize * arrayNsize);
//hipMalloc((void**)&d_bbb, sizeof(DATATYPE) * arrayMsize * arrayNsize);
//hipMalloc((void**)&d_ccc, sizeof(DATATYPE) * arrayMsize * arrayNsize);
//hipMemcpy(d_aaa, aa, sizeof(DATATYPE) * arrayNsize * arrayMsize, hipMemcpyHostToDevice);
//hipMemcpy(d_bbb, bb, sizeof(DATATYPE)* arrayNsize* arrayMsize, hipMemcpyHostToDevice);
//hipMemcpy(d_ccc, cc, sizeof(DATATYPE)* arrayNsize* arrayMsize, hipMemcpyHostToDevice);
//vector_add_gpu_4 << <blocknum, threadnum >> > (d_aaa, d_bbb, d_ccc, arrayMsize, arrayNsize);
//hipMemcpy(cc, d_ccc, sizeof(DATATYPE)* arrayNsize*arrayMsize, hipMemcpyDeviceToHost);
//hipFree(d_aaa);
//hipFree(d_bbb);
//hipFree(d_ccc);
//std::cout << "mn matrix add\n";
//for (int i = 0; i < arrayMsize; ++i) {
// for (int j = 0; j < arrayNsize; ++j) {
// std::cout << cc[i][j] << " ";
// cc[i][j] = 0;
// }
// std::cout << "\n";
//}
//mn
vector_add_mn(aa, bb, cc, arrayMsize, arrayNsize);
//std::cout << "mn matrix valid\n";
//for (int i = 0; i < arrayMsize; ++i) {
// for (int j = 0; j < arrayNsize; ++j) {
// std::cout << cc[i][j] << " ";
// cc[i][j] = 0;
// }
// std::cout << "\n";
//}
//block
DATATYPE* d_cccc,*d_ca;
DATATYPE ccccd,*cccc;
cccc = &ccccd;
hipMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_cccc, sizeof(DATATYPE));
//GPU
hipMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
//
vector_dot_product_gpu_1 << <single, threadnum >> > (d_a, d_b, d_c, arraySize);
//CPU
hipMemcpy(cccc, d_c, sizeof(DATATYPE), hipMemcpyDeviceToHost);
//
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
//printf("single block:{1,2,3,4,5} {10,20,30,40,50} = {%f}\n",ccccd);
cccc = 0;
//block
hipMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
//GPU
hipMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
//
vector_dot_product_gpu_2 << <single, threadnum >> > (d_a, d_b, d_c, arraySize);
//CPU
hipMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, hipMemcpyDeviceToHost);
//
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
//printf("single block:{1,2,3,4,5} {10,20,30,40,50} = {%f}\n",c[0]);
c[0] = 0;
//blockCPU
hipMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
//GPU
hipMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
//
vector_dot_product_gpu_3 << <blocknum, threadnum >> > (d_a, d_b, d_c, arraySize);
//CPU
hipMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, hipMemcpyDeviceToHost);
//
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
double temp=0;
for (int i = 0; i < blocknum&&i<arraySize; i++) {
if(c[i]!=NULL)
temp += c[i];
}
c[0] = temp;
//printf("multiple block(CPU):{1,2,3,4,5} {10,20,30,40,50} = {%f}\n", c[0]);
c[0] = 0;
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//blockGPU
hipMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_ca, sizeof(DATATYPE) * arraySize);
//GPU
hipMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
//
vector_dot_product_gpu_3 << <blocknum, threadnum >> > (d_a, d_b, d_c, arraySize);
vector_dot_product_gpu_4 <<< 1, blocknum >> > (d_c, d_ca);
//CPU
hipMemcpy(c, d_ca, sizeof(DATATYPE) * arraySize, hipMemcpyDeviceToHost);
//
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(d_ca);
//printf("multiple block(GPU):{1,2,3,4,5} {10,20,30,40,50} = {%f}\n", c[0]);
c[0] = 0;
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//
hipMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
//GPU
hipMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
//
vector_dot_product_gpu_5_0 << <blocknum, threadnum >> > (d_a, d_b, d_c, arraySize);
//CPU
hipMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, hipMemcpyDeviceToHost);
//
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
//printf("atomic_0 multiple block(GPU):{1,2,3,4,5} {10,20,30,40,50} = {%f}\n", c[0]);
c[0] = 0;
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
hipMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
//GPU
hipMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
//
vector_dot_product_gpu_5 << <blocknum, threadnum >> > (d_a, d_b, d_c, arraySize);
//CPU
hipMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, hipMemcpyDeviceToHost);
//
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
//printf("atomic_1 multiple block(GPU):{1,2,3,4,5} {10,20,30,40,50} = {%f}\n", c[0]);
c[0] = 0;
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//block
hipMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_c_tmp, sizeof(DATATYPE) * arraySize);
//GPU
hipMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, hipMemcpyHostToDevice);
//
vector_dot_product_gpu_6 << <blocknum, threadnum >> > (d_a, d_b,d_c_tmp, d_c, arraySize);
//CPU
hipMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, hipMemcpyDeviceToHost);
//
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(d_c_tmp);
//printf("counting method multiple block(GPU):{1,2,3,4,5} {10,20,30,40,50} = {%f}\n", c[0]);
c[0] = 0;
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//cublas
hipblasCreate(&handle);
hipMalloc((void**)&d_aa, sizeof(DATATYPE) * arraySize);
hipMalloc((void**)&d_bb, sizeof(DATATYPE) * arraySize);
hipblasSetVector(arraySize, sizeof(DATATYPE), a, 1, d_aa, 1);
hipblasSetVector(arraySize, sizeof(DATATYPE), b, 1, d_bb, 1);
hipblasSdot(handle, arraySize, d_aa, 1, d_bb, 1,&c1[0]);
//hipblasGetVector(arraySize, sizeof(DATATYPE), d_bb, 1, c1, 1);
hipFree(d_aa);
hipFree(d_bb);
hipblasDestroy(handle);
//printf("cublas :{1,2,3,4,5} {10,20,30,40,50} = {%f}\n",c1[0]);
for (int i = 0; i < arraySize; ++i) {
c1[i] = 0;
}
// Add vectors in parallel.
//hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
//if (cudaStatus != hipSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
//}
//
//printf("{1,2,3,4,5} + {10,20,30,40,50} = {%f,%f,%f,%f,%f}\n",
// c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
//cudaStatus = hipDeviceReset();
//if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceReset failed!");
// return 1;
//}
return 0;
}
*/
//
//amlblncmn,
void matrix_multiplication_serial_1(DATATYPE* a, DATATYPE* b, DATATYPE* c, int m, int n, int l) {
int i, j, k;
double temp = 0.0;
for (i = 0; i < m; ++i) {
for (j = 0; j < n; ++j) {
temp = 0.0;
for (k = 0; k < l; k++) {
temp += a[i * l + k] * b[k * n + j];
}
c[i * n + j] = temp;
}
}
}
//
void matrix_multiplication_serial_2(DATATYPE* a, DATATYPE* b, DATATYPE* c, int m, int n, int l) {
int i, j, k;
double temp = 0.0;
for (i = 0; i < m; i++) {
for (k = 0; k < l; k++) {
temp = a[i * l + k];
for (j = 0; j < n; j++) {
c[i * n + j] += temp * b[k * n + j];
}
}
}
}
//
void matrix_multiplication_serial_3(DATATYPE* a, DATATYPE* b, DATATYPE* c, int m, int n, int l) {
int i, j, k;
double temp = 0.0;
DATATYPE* b1;
b1 = (DATATYPE*)malloc(sizeof(DATATYPE) * l * n);
for (int i = 0; i < l; i++) {
for (int j = 0; j < n; j++) {
b1[i * l + j] = b[j * n + i];
}
}
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
temp = 0.0;
for (k = 0; k < l; k++) {
temp += a[i * l + k] * b1[j * n + k];
}
c[i * n + j] = temp;
}
}
free(b1);
}
//grid A:nlda;C:nldcldb=ldc
__global__ void matrix_multiplication_gpu_1(const DATATYPE* a, size_t lda, const DATATYPE* b, size_t ldb, DATATYPE* c, size_t ldc, int n) {
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int idx = bidx * blockDim.x + tidx;
const int row = idx / n;
const int column = idx % n;
if (row < n && column < ldc) {
double tmp = 0.0;
for (int i = 0; i < n; i++) {
tmp += a[row * lda + i] * b[i * ldb + column];
}
c[row * ldc + column] = tmp;
}
}
//blockAnldaBldaldbCnldcldb=ldc
__global__ void matrix_multiplication_gpu_1_0(const DATATYPE* a, size_t lda, const DATATYPE* b, size_t ldb, DATATYPE* c, size_t ldc, int n) {
int tidx = threadIdx.x;
int bidx = blockIdx.x;
double tmp = 0.0;
int i;
//for (; bidx < n; bidx += gridDim.x) //block
{
for (tidx = threadIdx.x; tidx < ldc; tidx += blockDim.x) {
tmp = 0.0;
for (i = 0; i < lda; i++) {
tmp += a[bidx * lda + i] * b[i * ldb + tidx];
}
c[bidx * ldc + tidx] = tmp;
}
}
}
//
__global__ void matrix_multiplication_gpu_2(const DATATYPE* a, size_t lda, const DATATYPE* b, size_t ldb, DATATYPE* c, size_t ldc, int n) {
extern __shared__ DATATYPE data[];
const int tid = threadIdx.x;
const int row = blockIdx.x;
int i, j;
for (i = tid; i < n; i += blockDim.x) {
data[i] = a[row * lda + i];
}
__syncthreads();
double tmp = 0.0;
for (j = tid; j < n; j += blockDim.x) {
tmp = 0.0;
for (i = 0; i < n; i++) {
tmp += data[i] * b[i * ldb + j];
//printf("%lf\n",tmp);
}
c[row * ldc + j] = tmp;
}
}
//
__global__ void matrix_multiplication_gpu_3(const DATATYPE* a, size_t lda, const DATATYPE* b, size_t ldb, DATATYPE* c, size_t ldc, int n) {
__shared__ DATATYPE matA[threadnx][threadnx];
__shared__ DATATYPE matB[threadnx][threadnx];
const int tidc = threadIdx.x;
const int tidr = threadIdx.y;
const int bidc = blockIdx.x * threadnx;
const int bidr = blockIdx.y * threadnx;
int i, j;
double results = 0.0;
for (j = 0;j < n;j += threadnx) {
if (tidr + bidr < n && tidc + j < n) {
matA[tidr][tidc] = a[(tidr + bidr) * lda + tidc + j];
}
else {
matA[tidr][tidc] = 0;
}
if (tidr + j < n && tidc + bidc < n) {
matB[tidr][tidc] = b[(tidr + j) * ldb + tidc + bidc];
}
else {
matB[tidr][tidc] = 0;
}
__syncthreads();
for (i = 0;i < threadnx;i++) {
results += matA[tidr][i] * matB[i][tidc];
}
__syncthreads();
}
if (tidr + bidr < n && tidc + bidc < n) {
c[(tidr + bidr) * ldc + tidc + bidc] = results;
}
}
//
__global__ void matrix_multiplication_gpu_4(const DATATYPE* a, size_t lda, const DATATYPE* b, size_t ldb, DATATYPE* c, size_t ldc, int n) {
__shared__ DATATYPE matA[threadnx][threadnx];
__shared__ DATATYPE matB[threadnx][threadnx];
const int tidc = threadIdx.x;
const int tidr = threadIdx.y;
const int bidc = blockIdx.x * threadnx;
const int bidr = blockIdx.y * threadnx;
int i, j;
double results = 0.0;
for (j = 0;j < n;j += threadnx) {
matA[tidr][tidc] = a[(tidr + bidr) * lda + tidc + j];
matB[tidr][tidc] = b[(tidr + j) * ldb + tidc + bidc];
__syncthreads();
for (i = 0;i < threadnx;i++) {
results += matA[tidr][i] * matB[i][tidc];
}
__syncthreads();
}
if (tidr + bidr < n && tidc + bidc < n) {
c[(tidr + bidr) * ldc + tidc + bidc] = results;
}
}
//int main() {
// DATATYPE* a, * b, * c, * d_a, * d_b, * d_c, * d_c3, * c3;
// a = (DATATYPE*)malloc(sizeof(DATATYPE*) * arraysizeM * arraysizeL);
// b = (DATATYPE*)malloc(sizeof(DATATYPE*) * arraysizeL * arraysizeN);
// c = (DATATYPE*)malloc(sizeof(DATATYPE*) * arraysizeM * arraysizeN);
// c3 = (DATATYPE*)malloc(sizeof(DATATYPE*) * arraysizeM * arraysizeN);
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// a[i] = i;
// }
// for (int i = 0; i < arraysizeL * arraysizeN; i++) {
// b[i] = i;
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c[i] = 0;
// }
// //
// matrix_multiplication_serial_1(a, b, c, arraysizeM, arraysizeN, arraysizeL);
// printf("serial a b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c[i] = 0;
// }
// //
// matrix_multiplication_serial_2(a, b, c, arraysizeM, arraysizeN, arraysizeL);
// printf("serial_mod a b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c[i] = 0;
// }
// //
// matrix_multiplication_serial_3(a, b, c, arraysizeM, arraysizeN, arraysizeL);
// printf("serial_transpose a b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c[i] = 0;
// }
//
// //grid
// hipMalloc((void**)&d_a, sizeof(DATATYPE) * arraysizeM * arraysizeL);
// hipMalloc((void**)&d_b, sizeof(DATATYPE) * arraysizeL * arraysizeN);
// hipMalloc((void**)&d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN);
// hipMemcpy(d_a, a, sizeof(DATATYPE) * arraysizeM * arraysizeL, hipMemcpyHostToDevice);
// hipMemcpy(d_b, b, sizeof(DATATYPE) * arraysizeL * arraysizeN, hipMemcpyHostToDevice);
// int blocks = (arraysizeN + threadnum - 1) / threadnum;
// matrix_multiplication_gpu_1 << <blocks * arraysizeN, threadnum >> > (d_a,arraysizeN,d_b,arraysizeN,d_c,arraysizeN,arraysizeN);
// hipMemcpy(c, d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN, hipMemcpyDeviceToHost);
// printf("serial_transpose a b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c[i] = 0;
// }
//
// //block
// hipMalloc((void**)&d_a, sizeof(DATATYPE) * arraysizeM * arraysizeL);
// hipMalloc((void**)&d_b, sizeof(DATATYPE) * arraysizeL * arraysizeN);
// hipMalloc((void**)&d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN);
// hipMemcpy(d_a, a, sizeof(DATATYPE) * arraysizeM * arraysizeL, hipMemcpyHostToDevice);
// hipMemcpy(d_b, b, sizeof(DATATYPE) * arraysizeL * arraysizeN, hipMemcpyHostToDevice);
// matrix_multiplication_gpu_1_0 << <arraysizeN, threadnum >> > (d_a, arraysizeN, d_b, arraysizeN, d_c, arraysizeN, arraysizeN);
// hipMemcpy(c, d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN, hipMemcpyDeviceToHost);
// printf("block thread a b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c[i] = 0;
// }
//
// //
// size_t pitch_a, pitch_b, pitch_c;
// hipMallocPitch((void**)&d_a, &pitch_a, sizeof(DATATYPE) * arraysizeN, arraysizeN);
// hipMallocPitch((void**)&d_b, &pitch_b, sizeof(DATATYPE) * arraysizeN, arraysizeN);
// hipMallocPitch((void**)&d_c3, &pitch_c, sizeof(DATATYPE) * arraysizeN, arraysizeN);
// //printf("%d,%d,%d", pitch_a, pitch_b, pitch_c);
// hipMemcpy2D(d_a, pitch_a, a, sizeof(DATATYPE) * arraysizeN, sizeof(DATATYPE) * arraysizeN, arraysizeN, hipMemcpyHostToDevice);
// hipMemcpy2D(d_b, pitch_b, b, sizeof(DATATYPE) * arraysizeN, sizeof(DATATYPE) * arraysizeN, arraysizeN, hipMemcpyHostToDevice);
// matrix_multiplication_gpu_2 << <arraysizeN, threadnum, sizeof(DATATYPE)* arraysizeN >> > (d_a, pitch_a / sizeof(DATATYPE), d_b, pitch_b / sizeof(DATATYPE), d_c3, pitch_c / sizeof(DATATYPE), arraysizeN);
// hipMemcpy2D(c3, sizeof(DATATYPE) * arraysizeN, d_c3, pitch_c, sizeof(DATATYPE) * arraysizeN, arraysizeN, hipMemcpyDeviceToHost);
// hipFree(d_a);
// hipFree(d_b);
// hipFree(d_c3);
// printf("aligned storage a b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c3 [i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c3 [i] = 0;
// }
//
// //
// int bx = (arraysizeN + threadnx - 1) / threadnx;
// dim3 blockns(bx, bx);
// dim3 threadns(threadnx, threadnx);
// hipMallocPitch((void**)&d_a, &pitch_a, sizeof(DATATYPE)* arraysizeN, arraysizeN);
// hipMallocPitch((void**)&d_b, &pitch_b, sizeof(DATATYPE)* arraysizeN, arraysizeN);
// hipMallocPitch((void**)&d_c3, &pitch_c, sizeof(DATATYPE)* arraysizeN, arraysizeN);
// hipMemcpy2D(d_a, pitch_a, a, sizeof(DATATYPE)* arraysizeN, sizeof(DATATYPE)* arraysizeN, arraysizeN, hipMemcpyHostToDevice);
// hipMemcpy2D(d_b, pitch_b, b, sizeof(DATATYPE)* arraysizeN, sizeof(DATATYPE)* arraysizeN, arraysizeN, hipMemcpyHostToDevice);
// matrix_multiplication_gpu_3 << <blockns, threadns >> > (d_a, pitch_a / sizeof(DATATYPE), d_b, pitch_b / sizeof(DATATYPE), d_c3, pitch_c / sizeof(DATATYPE), arraysizeN);
// hipMemcpy2D(c3, sizeof(DATATYPE)* arraysizeN, d_c3, pitch_c, sizeof(DATATYPE)* arraysizeN, arraysizeN, hipMemcpyDeviceToHost);
// hipFree(d_a);
// hipFree(d_b);
// hipFree(d_c3);
// printf("checkboard array a b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c3[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c3[i] = 0;
// }
//
// //
// hipMallocPitch((void**)&d_a, &pitch_a, sizeof(DATATYPE) * arraysizeN, arraysizeN);
// hipMallocPitch((void**)&d_b, &pitch_b, sizeof(DATATYPE) * arraysizeN, arraysizeN);
// hipMallocPitch((void**)&d_c3, &pitch_c, sizeof(DATATYPE)* arraysizeN, arraysizeN);
// hipMemcpy2D(d_a, pitch_a, a, sizeof(DATATYPE)* arraysizeN, sizeof(DATATYPE)* arraysizeN, arraysizeN, hipMemcpyHostToDevice);
// hipMemcpy2D(d_b, pitch_b, b, sizeof(DATATYPE)* arraysizeN, sizeof(DATATYPE)* arraysizeN, arraysizeN, hipMemcpyHostToDevice);
// matrix_multiplication_gpu_4 << <blockns, threadns >> > (d_a, pitch_a / sizeof(DATATYPE), d_b, pitch_b / sizeof(DATATYPE), d_c3, pitch_c / sizeof(DATATYPE), arraysizeN);
// hipMemcpy2D(c3, sizeof(DATATYPE)* arraysizeN, d_c3, pitch_c, sizeof(DATATYPE)* arraysizeN, arraysizeN, hipMemcpyDeviceToHost);
// hipFree(d_a);
// hipFree(d_b);
// hipFree(d_c3);
// printf("improved checkboard a b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c3[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c3[i] = 0;
// }
// //cublas
// hipblasHandle_t handle;
// hipblasCreate(&handle);
// hipMalloc((void**)&d_a, sizeof(DATATYPE)* arraysizeN* arraysizeN);
// hipMalloc((void**)&d_b, sizeof(DATATYPE)* arraysizeN* arraysizeN);
// hipMalloc((void**)&d_c3, sizeof(DATATYPE)* arraysizeN* arraysizeN);
// float alpha = 1.0;
// float beta = 0.0;
// hipblasSetVector(arraysizeN* arraysizeN, sizeof(DATATYPE), a, 1, d_a, 1);
// hipblasSetVector(arraysizeN* arraysizeN, sizeof(DATATYPE), b, 1, d_b, 1);
// hipblasSetVector(arraysizeN * arraysizeN, sizeof(DATATYPE), c3, 1, d_c3, 1);
// hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, arraysizeN, arraysizeN, arraysizeN, &alpha, d_b, arraysizeN, d_a, arraysizeN, &beta, d_c3, arraysizeN);
// hipblasGetVector(arraysizeN * arraysizeN, sizeof(DATATYPE), d_c3, 1, c3, 1);
// hipFree(d_a);
// hipFree(d_b);
// hipFree(d_c3);
// hipblasDestroy(handle);
// printf("cublas a b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c3[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c3[i] = 0;
// }
//
// return 0;
//}
//
//() a:mnc:nm
void matrix_transposition_serial_1(DATATYPE* a, DATATYPE* c, int m, int n) {
int i, j;
for (i = 0;i < m;i++) {
for (j = 0;j < n;j++) {
c[j * m + i] = a[i * n + j];
}
}
}
// a:mnc:nm
void matrix_transposition_serial_2(DATATYPE* a, DATATYPE* c, int m, int n) {
int i, j;
for (i = 0;i < n;i++) {
for (j = 0;j < m;j++) {
c[i * n + j] = a[j * m + i];
}
}
}
//1D
__global__ void matrix_transposition_gpu_1d_1(DATATYPE* a, DATATYPE* c, int m, int n) {
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
int tid = tidx;
int bid = bidx;
while (bid < m) {
while (tid < n) {
c[tid * m + bid] = a[bid * n + tid];
tid += blockDim.x;
}
bid += gridDim.x;
}
}
//1D
__global__ void matrix_transposition_gpu_1d_2(DATATYPE* a, DATATYPE* c, int m, int n) {
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
int tid = tidx;
int bid = bidx;
while (bid < m) {
while (tid < n) {
c[bid * n + tid] = a[tid * m + bid];
tid += blockDim.x;
}
bid += gridDim.x;
}
}
//2D
__global__ void matrix_transposition_gpu_2d_1(DATATYPE* a, DATATYPE* c, int m, int n) {
const unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
const unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
if ((xIndex < n) && (yIndex < m)) {
unsigned int index_in = xIndex + n * yIndex;
unsigned int index_out = yIndex + m * xIndex;
c[index_out] = a[index_in];
}
}
//2D
__global__ void matrix_transposition_gpu_2d_2(DATATYPE* a, DATATYPE* c, int m, int n) {
__shared__ DATATYPE tmp[blocknum][blocknum + 1];
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
if ((xIndex < arraysizeN) && (yIndex < arraysizeM)) {
unsigned int index_in = xIndex + n * yIndex;
tmp[threadIdx.y][threadIdx.x] = a[index_in];
}
__syncthreads();
xIndex = blockIdx.y * blocknum + threadIdx.x;
yIndex = blockIdx.x * blocknum + threadIdx.y;
if ((xIndex < m) && (yIndex < n)) {
unsigned int index_out = yIndex * m + xIndex;
c[index_out] = tmp[threadIdx.x][threadIdx.y];
}
//printf("%.0f %d \n", c[yIndex * m + xIndex], yIndex * m + xIndex);
}
//2Ddiagonal(
__global__ void matrix_transposition_gpu_diagonal(DATATYPE* a, DATATYPE* c, int m, int n) {
__shared__ float tile[blocknum][blocknum+1];
int blockIdx_x, blockIdx_y;
if (n == m) {
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
}
else {
int bid = blockIdx.x + gridDim.x * blockIdx.y;
blockIdx_y = bid % gridDim.y;
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
}
int xIndex = blockIdx_x * blocknum + threadIdx.x;
int yIndex = blockIdx_y * blocknum + threadIdx.y;
int index_in = xIndex + (yIndex)*n;
xIndex = blockIdx_y * blocknum + threadIdx.x;
yIndex = blockIdx_x * blocknum + threadIdx.y;
int index_out = xIndex + (yIndex)*m;
tile[threadIdx.y][threadIdx.x] = a[index_in];
__syncthreads();
c[index_out] = tile[threadIdx.x][threadIdx.y];
//printf("%.0f%1.0f %d %d %d \n",c[index_out],tile[threadIdx.x][threadIdx.y],threadIdx.x,threadIdx.y,index_out);
if (index_out == 0) {
for (int i = 0;i < blocknum;i++) {
for (int j = 0;j < blocknum + 1;j++) {
printf("%4.0f", tile[i][j]);
}
printf("\n");
}
}
}
int main() {
DATATYPE* a, * b, * c, * d_a, * d_b, * d_c, * d_c3, * c3;
a = (DATATYPE*)malloc(sizeof(DATATYPE*) * arraysizeM * arraysizeN);
//b = (DATATYPE*)malloc(sizeof(DATATYPE*) * arraysizeL * arraysizeN);
c = (DATATYPE*)malloc(sizeof(DATATYPE*) * arraysizeM * arraysizeN);
for (int i = 0; i < arraysizeM * arraysizeN; i++) {
a[i] = i;
}
/*for (int i = 0; i < arraysizeL * arraysizeN; i++) {
b[i] = i;
}*/
for (int i = 0; i < arraysizeM * arraysizeN; i++) {
c[i] = 0;
}
matrix_transposition_serial_1(a, c, arraysizeN, arraysizeN);
printf(":\n");
for (int i = 0;i < arraysizeN;i++) {
for (int j = 0;j < arraysizeM;j++) {
printf("%5.0f", c[i * arraysizeN + j]);
c[i * arraysizeN + j] = 0;
}
printf("\n");
}
matrix_transposition_serial_2(a, c, arraysizeN, arraysizeN);
printf("(:\n");
for (int i = 0;i < arraysizeN;i++) {
for (int j = 0;j < arraysizeM;j++) {
printf("%5.0f", c[i * arraysizeN + j]);
c[i * arraysizeN + j] = 0;
}
printf("\n");
}
hipMalloc((void**)&d_a, sizeof(DATATYPE) * arraysizeM * arraysizeN);
hipMalloc((void**)&d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN);
hipMemcpy(d_a, a, sizeof(DATATYPE) * arraysizeM * arraysizeN, hipMemcpyHostToDevice);
matrix_transposition_gpu_1d_1 << <512, 512 >> > (d_a, d_c, arraysizeN, arraysizeN);
hipMemcpy(c, d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN, hipMemcpyDeviceToHost);
printf("1D:\n");
for (int i = 0;i < arraysizeN;i++) {
for (int j = 0;j < arraysizeM;j++) {
printf("%5.0f", c[i * arraysizeN + j]);
c[i * arraysizeN + j] = 0;
}
printf("\n");
}
matrix_transposition_gpu_1d_2 << <512, 512 >> > (d_a, d_c, arraysizeN, arraysizeN);
hipMemcpy(c, d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN, hipMemcpyDeviceToHost);
printf("1D:\n");
for (int i = 0;i < arraysizeN;i++) {
for (int j = 0;j < arraysizeM;j++) {
printf("%5.0f", c[i * arraysizeN + j]);
c[i * arraysizeN + j] = 0;
}
printf("\n");
}
dim3 threads(blocknum, blocknum, 1);
dim3 blocks((arraysizeN + blocknum - 1) / blocknum, (arraysizeN + blocknum - 1) / blocknum, 1);
matrix_transposition_gpu_2d_1 << <blocks, threads >> > (d_a, d_c, arraysizeN, arraysizeN);
hipMemcpy(c, d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN, hipMemcpyDeviceToHost);
printf("2D:\n");
for (int i = 0;i < arraysizeN;i++) {
for (int j = 0;j < arraysizeM;j++) {
printf("%5.0f", c[i * arraysizeN + j]);
c[i * arraysizeN + j] = 0;
}
printf("\n");
}
matrix_transposition_gpu_2d_2 << <blocks, threads >> > (d_a, d_c, arraysizeN, arraysizeN);
hipMemcpy(c, d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN, hipMemcpyDeviceToHost);
printf("2D:\n");
for (int i = 0;i < arraysizeN;i++) {
for (int j = 0;j < arraysizeM;j++) {
printf("%5.0f", c[i * arraysizeN + j]);
c[i * arraysizeN + j] = 0;
}
printf("\n");
}
/*matrix_transposition_gpu_diagonal << <blocks, threads >> > (d_a, d_c, arraysizeN, arraysizeN);
hipMemcpy(c, d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN, hipMemcpyDeviceToHost);
printf("2Ddiagonal:\n");
for (int i = 0;i < arraysizeN;i++) {
for (int j = 0;j < arraysizeM;j++) {
printf("%5.0f", c[i * arraysizeN + j]);
c[i * arraysizeN + j] = 0;
}
printf("\n");
}*/
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int* c, int* a, int* b, int size)
{
int* dev_a = 0;
int* dev_b = 0;
int* dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel << <1, size >> > (dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| 8291326660a29d75f75e0d4f26ebaab4253a87a9.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cublas_v2.h>
#include <iostream>
#include <stdio.h>
#define DATATYPE float
#define arraySize 5
#define threadnum 16
#define blocknum 16
#define arrayNsize 10
#define arrayMsize 15
#define arraysize 5
#define single 1
//第八章 矩阵乘法
#define arraysizeM 10
#define arraysizeL 10
#define arraysizeN 10
#define threadnx 2
cudaError_t addWithCuda(int* c, int* a, int* b, int size);
//示例程序
__global__ void addKernel(int* c, int* a, int* b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
//单block单thread向量加法
__global__ void vector_add_gpu_1(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
for (int i = 0; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
//单block多thread向量加法
__global__ void vector_add_gpu_2(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
int tid = threadIdx.x;
const int t_n = blockDim.x;//线程总数
while (tid < n) {
c[tid] = a[tid] + b[tid];
tid += t_n;
}
}
//多block多thread向量加法
__global__ void vector_add_gpu_3(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
//全局线程索引:tid=blockIdx.x*blockDim.x+threadIdx.x,跳步大小:gird内所有thread数量(gridDim.x*blockDim.x)
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int t_n = gridDim.x * blockDim.x;
int tid = bidx * blockDim.x + tidx;
while (tid < n) {
c[tid] = a[tid] + b[tid];
tid += t_n;
}
}
//m×n矩阵串行计算
void vector_add_mn(DATATYPE** a, DATATYPE** b, DATATYPE** c, int m, int n) {
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
c[i][j] = a[i][j] + b[i][j];
}
}
}
//维度为m × n的矩阵加法并行计算
__global__ void vector_add_gpu_4(DATATYPE(*a)[arrayNsize], DATATYPE(*b)[arrayNsize], DATATYPE(*c)[arrayNsize], int m, int n) {
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int tidy = threadIdx.y + blockDim.y * blockIdx.y;
while (tid < m && tidy < m) {
c[tid][tidy] = a[tid][tidy] + b[tid][tidy];
}
}
//CPU串行向量内积运算
void vector_dot_product_serial(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
double temp = 0;
for (int i = 0; i < n; ++i) {
temp += a[i] * b[i];
}
*c = temp;
}
//GPU分散归约向量内积
__global__ void vector_dot_product_gpu_1(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
__shared__ DATATYPE tmp[threadnum];
const int tidx = threadIdx.x;
const int t_n = blockDim.x;
int tid = tidx;
double temp = 0.0;
while (tid < n) {
temp += a[tid] * b[tid];
tid += t_n;
tmp[tidx] = temp;
__syncthreads();
int i = 2, j = 1;
while (i <= threadnum) {
if ((tidx % i) == 0) {
tmp[tidx] += tmp[tidx + j];
}
__syncthreads();
i *= 2;
j *= 2;
}
if (tidx == 0) {
c[0] = tmp[0];
}
}
}
//单block低线程归约向量内积
__global__ void vector_dot_product_gpu_2(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
__shared__ DATATYPE tmp[threadnum];
const int tidx = threadIdx.x;
const int t_n = blockDim.x;
int tid = tidx;
double temp = 0.0;
while (tid < n) {
temp += a[tid] * b[tid];
tid += t_n;
}
tmp[tidx] = temp;
__syncthreads();
int i = threadnum / 2;
while (i != 0) {
if (tidx < i) {
tmp[tidx] += tmp[tidx + i];
}
__syncthreads();
i /= 2;
}
if (tidx == 0) {
c[0] = tmp[0];
}
}
//多block向量内积(CPU二次归约)
__global__ void vector_dot_product_gpu_3(DATATYPE* a, DATATYPE* b, DATATYPE* c_tmp, int n) {
__shared__ DATATYPE tmp[threadnum];
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int t_n = blockDim.x * gridDim.x;
int tid = bidx * blockDim.x + tidx;
double temp = 0.0;
while (tid < n) {
temp += a[tid] * b[tid];
tid += t_n;
}
tmp[tidx] = temp;
__syncthreads();
int i = threadnum / 2;
while (i != 0) {
if (tidx < i) {
tmp[tidx] += tmp[tidx + i];
}
__syncthreads();
i /= 2;
}
if (tidx == 0) {
c_tmp[bidx] = tmp[0];
}
}
//GPU归约
__global__ void vector_dot_product_gpu_4(float* result_tmp, float* result) {
__shared__ float temp[blocknum];
const int tidx = threadIdx.x;
temp[tidx] = result_tmp[tidx];
__syncthreads();
int i = blocknum / 2;
while (i != 0) {
if (tidx < i) {
temp[tidx] += temp[tidx + i];
}
__syncthreads();
i /= 2;
}
if (tidx == 0) {
result[0] = temp[0];
}
}
//原子操作多block向量内积(两次归约替换一次原子操作)
__global__ void vector_dot_product_gpu_5_0(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
if ((threadIdx.x == 0) && (blockIdx.x == 0)) {
c[0] = 0.0;
}
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int t_n = blockDim.x * gridDim.x;
int tid = bidx * blockDim.x + tidx;
double temp = 0.0;
while (tid < n) {
temp += a[tid] * b[tid];
tid += t_n;
}
atomicAdd(c, temp);
}
//原子操作多block向量内积(block内归约block间原子操作)
__global__ void vector_dot_product_gpu_5(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
if ((threadIdx.x == 0) && (blockIdx.x == 0)) {
c[0] = 0.0;
}
__shared__ DATATYPE tmp[threadnum];
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int t_n = blockDim.x * gridDim.x;
int tid = bidx * blockDim.x + tidx;
double temp = 0.0;
while (tid < n) {
temp += a[tid] * b[tid];
tid += t_n;
}
tmp[tidx] = temp;
__syncthreads();
int i = blockDim.x / 2;
while (i != 0) {
if (tidx < i) {
tmp[tidx] += tmp[tidx + i];
}
__syncthreads();
i /= 2;
}
if (tidx == 0) {
atomicAdd(c, tmp[0]);
}
}
//计数法实现多block向量内积
__device__ void vector_dot(DATATYPE* out, volatile DATATYPE* tmp) {
const int tidx = threadIdx.x;
int i = blockDim.x / 2;
while (i != 0) {
if (tidx < i) {
tmp[tidx] += tmp[tidx + i];
}
__syncthreads();
i /= 2;
}
if (tidx == 0) {
out[0] = tmp[0];
}
}
__device__ unsigned int lockcount = 0;
__global__ void vector_dot_product_gpu_6(DATATYPE* a, DATATYPE* b, DATATYPE* c_tmp, DATATYPE* c, int n) {
__shared__ DATATYPE tmp[threadnum];
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int t_n = blockDim.x * gridDim.x;
int tid = bidx * blockDim.x + tidx;
double temp = 0.0;
while (tid < n) {
temp += a[tid] * b[tid];
tid += t_n;
}
tmp[tidx] = temp;
__syncthreads();
vector_dot(&c_tmp[blockIdx.x], tmp);
__shared__ bool lock;
__threadfence();
if (tidx == 0) {
unsigned int lockiii = atomicAdd(&lockcount, 1);
lock = (lockcount == gridDim.x);
}
__syncthreads();
if (lock) {
tmp[tidx] = c_tmp[tidx];
__syncthreads();
vector_dot(c, tmp);
lockcount = 0;
}
}
//串行向量加法
void vector_add_serial(DATATYPE* a, DATATYPE* b, DATATYPE* c, int n) {
for (int i = 0; i < n; ++i) {
c[i] = a[i] + b[i];
}
}
/*
int main()
{
//dim3 blocknum(1);
//dim3 threadnum(arrayMsize,arrayNsize);
float a[arraySize] = { 1, 2, 3, 4, 5 };
float b[arraySize] = { 10, 20, 30, 40, 50 };
float c[arraySize] = { 0 };
float c1[arraySize] = { 0 };
float **aa;
float **bb;
float **cc;
aa = (float**)malloc(sizeof(float*) * arrayMsize);
bb = (float**)malloc(sizeof(float*) * arrayMsize);
cc = (float**)malloc(sizeof(float*) * arrayMsize);
for (int i = 0; i < arrayMsize; ++i) {
aa[i] = (float*)malloc(sizeof(float*) * arrayNsize);
bb[i] = (float*)malloc(sizeof(float*) * arrayNsize);
cc[i] = (float*)malloc(sizeof(float*) * arrayNsize);
}
for (int i = 0; i < arrayMsize; ++i) {
for (int j = 0; j < arrayNsize; ++j) {
aa[i][j] = j;
bb[i][j] = j * 10;
cc[i][j] = 0;
}
}
//串行测试
vector_add_serial(a, b, c, arraySize);
//printf("serial :{1,2,3,4,5} + {10,20,30,40,50} = {%f,%f,%f,%f,%f}\n",c[0], c[1], c[2], c[3], c[4]);
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//单block单thread加法测试
//GPU内存分配
DATATYPE* d_a, * d_b, * d_c,* d_c_tmp;
cudaMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
//复制数据到GPU
cudaMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
//计算
vector_add_gpu_1<<<single,single>>>(d_a, d_b, d_c, arraySize);
//复制结果到CPU
cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, cudaMemcpyDeviceToHost);
//释放空间
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//printf("single block single thread :{1,2,3,4,5} + {10,20,30,40,50} = {%f,%f,%f,%f,%f}\n",c[0], c[1], c[2], c[3], c[4]);
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//单block多thread加法
cudaMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
//复制数据到GPU
cudaMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
//计算
vector_add_gpu_2 <<<1, threadnum >>> (d_a, d_b, d_c, arraySize);
//复制结果到CPU
cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, cudaMemcpyDeviceToHost);
//释放空间
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//printf("single block multiple thread :{1,2,3,4,5} + {10,20,30,40,50} = {%f,%f,%f,%f,%f}\n",c[0], c[1], c[2], c[3], c[4]);
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//多block多thread加法
cudaMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
//复制数据到GPU
cudaMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
//计算
vector_add_gpu_3 <<<blocknum, threadnum >>> (d_a, d_b, d_c, arraySize);
//复制结果到CPU
cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, cudaMemcpyDeviceToHost);
//释放空间
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//printf("multiple block multiple thread :{1,2,3,4,5} + {10,20,30,40,50} = {%f,%f,%f,%f,%f}\n",c[0], c[1], c[2], c[3], c[4]);
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//cublas库向量加法
DATATYPE* d_aa, * d_bb;
cublasHandle_t handle;
cublasCreate(&handle);
cudaMalloc((void**)&d_aa, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_bb, sizeof(DATATYPE) * arraySize);
float alpha = 1.0;
cublasSetVector(arraySize, sizeof(DATATYPE), a, 1, d_aa, 1);
cublasSetVector(arraySize, sizeof(DATATYPE), b, 1, d_bb, 1);
cublasSaxpy_v2(handle, arraySize, &alpha, d_aa, 1, d_bb, 1);
cublasGetVector(arraySize, sizeof(DATATYPE), d_bb, 1, c1, 1);
cudaFree(d_aa);
cudaFree(d_bb);
cublasDestroy(handle);
//printf("cublas :{1,2,3,4,5} + {10,20,30,40,50} = {%f,%f,%f,%f,%f}\n",c1[0], c1[1], c1[2], c1[3], c1[4]);
for (int i = 0; i < arraySize; ++i) {
c1[i] = 0;
}
//m×n矩阵并行加法
//DATATYPE(*d_aaa)[arrayNsize], (*d_bbb)[arrayNsize], (*d_ccc)[arrayNsize];
//cudaMalloc((void**)&d_aaa, sizeof(DATATYPE) * arrayMsize * arrayNsize);
//cudaMalloc((void**)&d_bbb, sizeof(DATATYPE) * arrayMsize * arrayNsize);
//cudaMalloc((void**)&d_ccc, sizeof(DATATYPE) * arrayMsize * arrayNsize);
//cudaMemcpy(d_aaa, aa, sizeof(DATATYPE) * arrayNsize * arrayMsize, cudaMemcpyHostToDevice);
//cudaMemcpy(d_bbb, bb, sizeof(DATATYPE)* arrayNsize* arrayMsize, cudaMemcpyHostToDevice);
//cudaMemcpy(d_ccc, cc, sizeof(DATATYPE)* arrayNsize* arrayMsize, cudaMemcpyHostToDevice);
//vector_add_gpu_4 << <blocknum, threadnum >> > (d_aaa, d_bbb, d_ccc, arrayMsize, arrayNsize);
//cudaMemcpy(cc, d_ccc, sizeof(DATATYPE)* arrayNsize*arrayMsize, cudaMemcpyDeviceToHost);
//cudaFree(d_aaa);
//cudaFree(d_bbb);
//cudaFree(d_ccc);
//std::cout << "m×n matrix add\n";
//for (int i = 0; i < arrayMsize; ++i) {
// for (int j = 0; j < arrayNsize; ++j) {
// std::cout << cc[i][j] << " ";
// cc[i][j] = 0;
// }
// std::cout << "\n";
//}
//m×n加法结果验证
vector_add_mn(aa, bb, cc, arrayMsize, arrayNsize);
//std::cout << "m×n matrix valid\n";
//for (int i = 0; i < arrayMsize; ++i) {
// for (int j = 0; j < arrayNsize; ++j) {
// std::cout << cc[i][j] << " ";
// cc[i][j] = 0;
// }
// std::cout << "\n";
//}
//单block分散归约向量内积
DATATYPE* d_cccc,*d_ca;
DATATYPE ccccd,*cccc;
cccc = &ccccd;
cudaMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_cccc, sizeof(DATATYPE));
//复制数据到GPU
cudaMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
//计算
vector_dot_product_gpu_1 << <single, threadnum >> > (d_a, d_b, d_c, arraySize);
//复制结果到CPU
cudaMemcpy(cccc, d_c, sizeof(DATATYPE), cudaMemcpyDeviceToHost);
//释放空间
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//printf("single block:{1,2,3,4,5} · {10,20,30,40,50} = {%f}\n",ccccd);
cccc = 0;
//单block低线程归约向量内积
cudaMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
//复制数据到GPU
cudaMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
//计算
vector_dot_product_gpu_2 << <single, threadnum >> > (d_a, d_b, d_c, arraySize);
//复制结果到CPU
cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, cudaMemcpyDeviceToHost);
//释放空间
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//printf("single block:{1,2,3,4,5} · {10,20,30,40,50} = {%f}\n",c[0]);
c[0] = 0;
//多block向量内积(CPU二次归约)
cudaMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
//复制数据到GPU
cudaMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
//计算
vector_dot_product_gpu_3 << <blocknum, threadnum >> > (d_a, d_b, d_c, arraySize);
//复制结果到CPU
cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, cudaMemcpyDeviceToHost);
//释放空间
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
double temp=0;
for (int i = 0; i < blocknum&&i<arraySize; i++) {
if(c[i]!=NULL)
temp += c[i];
}
c[0] = temp;
//printf("multiple block(CPU):{1,2,3,4,5} · {10,20,30,40,50} = {%f}\n", c[0]);
c[0] = 0;
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//多block向量内积(GPU二次归约)
cudaMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_ca, sizeof(DATATYPE) * arraySize);
//复制数据到GPU
cudaMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
//计算
vector_dot_product_gpu_3 << <blocknum, threadnum >> > (d_a, d_b, d_c, arraySize);
vector_dot_product_gpu_4 <<< 1, blocknum >> > (d_c, d_ca);
//复制结果到CPU
cudaMemcpy(c, d_ca, sizeof(DATATYPE) * arraySize, cudaMemcpyDeviceToHost);
//释放空间
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_ca);
//printf("multiple block(GPU):{1,2,3,4,5} · {10,20,30,40,50} = {%f}\n", c[0]);
c[0] = 0;
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//原子操作
cudaMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
//复制数据到GPU
cudaMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
//计算
vector_dot_product_gpu_5_0 << <blocknum, threadnum >> > (d_a, d_b, d_c, arraySize);
//复制结果到CPU
cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, cudaMemcpyDeviceToHost);
//释放空间
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//printf("atomic_0 multiple block(GPU):{1,2,3,4,5} · {10,20,30,40,50} = {%f}\n", c[0]);
c[0] = 0;
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
cudaMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
//复制数据到GPU
cudaMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
//计算
vector_dot_product_gpu_5 << <blocknum, threadnum >> > (d_a, d_b, d_c, arraySize);
//复制结果到CPU
cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, cudaMemcpyDeviceToHost);
//释放空间
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//printf("atomic_1 multiple block(GPU):{1,2,3,4,5} · {10,20,30,40,50} = {%f}\n", c[0]);
c[0] = 0;
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//计数法实现多block向量内积
cudaMalloc((void**)&d_a, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_b, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_c, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_c_tmp, sizeof(DATATYPE) * arraySize);
//复制数据到GPU
cudaMemcpy(d_a, a, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(DATATYPE) * arraySize, cudaMemcpyHostToDevice);
//计算
vector_dot_product_gpu_6 << <blocknum, threadnum >> > (d_a, d_b,d_c_tmp, d_c, arraySize);
//复制结果到CPU
cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraySize, cudaMemcpyDeviceToHost);
//释放空间
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_c_tmp);
//printf("counting method multiple block(GPU):{1,2,3,4,5} · {10,20,30,40,50} = {%f}\n", c[0]);
c[0] = 0;
for (int i = 0; i < arraySize; ++i) {
c[i] = 0;
}
//cublas库向量内积
cublasCreate(&handle);
cudaMalloc((void**)&d_aa, sizeof(DATATYPE) * arraySize);
cudaMalloc((void**)&d_bb, sizeof(DATATYPE) * arraySize);
cublasSetVector(arraySize, sizeof(DATATYPE), a, 1, d_aa, 1);
cublasSetVector(arraySize, sizeof(DATATYPE), b, 1, d_bb, 1);
cublasSdot_v2(handle, arraySize, d_aa, 1, d_bb, 1,&c1[0]);
//cublasGetVector(arraySize, sizeof(DATATYPE), d_bb, 1, c1, 1);
cudaFree(d_aa);
cudaFree(d_bb);
cublasDestroy(handle);
//printf("cublas :{1,2,3,4,5} · {10,20,30,40,50} = {%f}\n",c1[0]);
for (int i = 0; i < arraySize; ++i) {
c1[i] = 0;
}
// Add vectors in parallel.
//cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
//if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
//}
//
//printf("{1,2,3,4,5} + {10,20,30,40,50} = {%f,%f,%f,%f,%f}\n",
// c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
//cudaStatus = cudaDeviceReset();
//if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceReset failed!");
// return 1;
//}
return 0;
}
*/
//第八章
//a:m行l列,b:l行n列,c:m行n列,串行计算
void matrix_multiplication_serial_1(DATATYPE* a, DATATYPE* b, DATATYPE* c, int m, int n, int l) {
int i, j, k;
double temp = 0.0;
for (i = 0; i < m; ++i) {
for (j = 0; j < n; ++j) {
temp = 0.0;
for (k = 0; k < l; k++) {
temp += a[i * l + k] * b[k * n + j];
}
c[i * n + j] = temp;
}
}
}
//循环交换矩阵乘法
void matrix_multiplication_serial_2(DATATYPE* a, DATATYPE* b, DATATYPE* c, int m, int n, int l) {
int i, j, k;
double temp = 0.0;
for (i = 0; i < m; i++) {
for (k = 0; k < l; k++) {
temp = a[i * l + k];
for (j = 0; j < n; j++) {
c[i * n + j] += temp * b[k * n + j];
}
}
}
}
//转置矩阵乘法
void matrix_multiplication_serial_3(DATATYPE* a, DATATYPE* b, DATATYPE* c, int m, int n, int l) {
int i, j, k;
double temp = 0.0;
DATATYPE* b1;
b1 = (DATATYPE*)malloc(sizeof(DATATYPE) * l * n);
for (int i = 0; i < l; i++) {
for (int j = 0; j < n; j++) {
b1[i * l + j] = b[j * n + i];
}
}
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
temp = 0.0;
for (k = 0; k < l; k++) {
temp += a[i * l + k] * b1[j * n + k];
}
c[i * n + j] = temp;
}
}
free(b1);
}
//grid线程循环矩阵乘法 A:n行lda列;C:n行ldc列;其中ldb=ldc
__global__ void matrix_multiplication_gpu_1(const DATATYPE* a, size_t lda, const DATATYPE* b, size_t ldb, DATATYPE* c, size_t ldc, int n) {
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int idx = bidx * blockDim.x + tidx;
const int row = idx / n;
const int column = idx % n;
if (row < n && column < ldc) {
double tmp = 0.0;
for (int i = 0; i < n; i++) {
tmp += a[row * lda + i] * b[i * ldb + column];
}
c[row * ldc + column] = tmp;
}
}
//block线程循环矩阵乘法,A:n行lda列,B:lda行ldb列;C:n行ldc列,其中ldb=ldc
__global__ void matrix_multiplication_gpu_1_0(const DATATYPE* a, size_t lda, const DATATYPE* b, size_t ldb, DATATYPE* c, size_t ldc, int n) {
int tidx = threadIdx.x;
int bidx = blockIdx.x;
double tmp = 0.0;
int i;
//for (; bidx < n; bidx += gridDim.x) //若矩阵行数超过block上限,则取消该句注释
{
for (tidx = threadIdx.x; tidx < ldc; tidx += blockDim.x) {
tmp = 0.0;
for (i = 0; i < lda; i++) {
tmp += a[bidx * lda + i] * b[i * ldb + tidx];
}
c[bidx * ldc + tidx] = tmp;
}
}
}
//行共享存储矩阵乘法
__global__ void matrix_multiplication_gpu_2(const DATATYPE* a, size_t lda, const DATATYPE* b, size_t ldb, DATATYPE* c, size_t ldc, int n) {
extern __shared__ DATATYPE data[];
const int tid = threadIdx.x;
const int row = blockIdx.x;
int i, j;
for (i = tid; i < n; i += blockDim.x) {
data[i] = a[row * lda + i];
}
__syncthreads();
double tmp = 0.0;
for (j = tid; j < n; j += blockDim.x) {
tmp = 0.0;
for (i = 0; i < n; i++) {
tmp += data[i] * b[i * ldb + j];
//printf("%lf\n",tmp);
}
c[row * ldc + j] = tmp;
}
}
//棋盘阵列矩阵乘法
__global__ void matrix_multiplication_gpu_3(const DATATYPE* a, size_t lda, const DATATYPE* b, size_t ldb, DATATYPE* c, size_t ldc, int n) {
__shared__ DATATYPE matA[threadnx][threadnx];
__shared__ DATATYPE matB[threadnx][threadnx];
const int tidc = threadIdx.x;
const int tidr = threadIdx.y;
const int bidc = blockIdx.x * threadnx;
const int bidr = blockIdx.y * threadnx;
int i, j;
double results = 0.0;
for (j = 0;j < n;j += threadnx) {
if (tidr + bidr < n && tidc + j < n) {
matA[tidr][tidc] = a[(tidr + bidr) * lda + tidc + j];
}
else {
matA[tidr][tidc] = 0;
}
if (tidr + j < n && tidc + bidc < n) {
matB[tidr][tidc] = b[(tidr + j) * ldb + tidc + bidc];
}
else {
matB[tidr][tidc] = 0;
}
__syncthreads();
for (i = 0;i < threadnx;i++) {
results += matA[tidr][i] * matB[i][tidc];
}
__syncthreads();
}
if (tidr + bidr < n && tidc + bidc < n) {
c[(tidr + bidr) * ldc + tidc + bidc] = results;
}
}
//移除棋盘阵列中的判断
__global__ void matrix_multiplication_gpu_4(const DATATYPE* a, size_t lda, const DATATYPE* b, size_t ldb, DATATYPE* c, size_t ldc, int n) {
__shared__ DATATYPE matA[threadnx][threadnx];
__shared__ DATATYPE matB[threadnx][threadnx];
const int tidc = threadIdx.x;
const int tidr = threadIdx.y;
const int bidc = blockIdx.x * threadnx;
const int bidr = blockIdx.y * threadnx;
int i, j;
double results = 0.0;
for (j = 0;j < n;j += threadnx) {
matA[tidr][tidc] = a[(tidr + bidr) * lda + tidc + j];
matB[tidr][tidc] = b[(tidr + j) * ldb + tidc + bidc];
__syncthreads();
for (i = 0;i < threadnx;i++) {
results += matA[tidr][i] * matB[i][tidc];
}
__syncthreads();
}
if (tidr + bidr < n && tidc + bidc < n) {
c[(tidr + bidr) * ldc + tidc + bidc] = results;
}
}
//int main() {
// DATATYPE* a, * b, * c, * d_a, * d_b, * d_c, * d_c3, * c3;
// a = (DATATYPE*)malloc(sizeof(DATATYPE*) * arraysizeM * arraysizeL);
// b = (DATATYPE*)malloc(sizeof(DATATYPE*) * arraysizeL * arraysizeN);
// c = (DATATYPE*)malloc(sizeof(DATATYPE*) * arraysizeM * arraysizeN);
// c3 = (DATATYPE*)malloc(sizeof(DATATYPE*) * arraysizeM * arraysizeN);
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// a[i] = i;
// }
// for (int i = 0; i < arraysizeL * arraysizeN; i++) {
// b[i] = i;
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c[i] = 0;
// }
// //串行
// matrix_multiplication_serial_1(a, b, c, arraysizeM, arraysizeN, arraysizeL);
// printf("serial a × b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c[i] = 0;
// }
// //循环交换
// matrix_multiplication_serial_2(a, b, c, arraysizeM, arraysizeN, arraysizeL);
// printf("serial_mod a × b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c[i] = 0;
// }
// //转置矩阵乘法
// matrix_multiplication_serial_3(a, b, c, arraysizeM, arraysizeN, arraysizeL);
// printf("serial_transpose a × b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c[i] = 0;
// }
//
// //grid线程循环矩阵乘法
// cudaMalloc((void**)&d_a, sizeof(DATATYPE) * arraysizeM * arraysizeL);
// cudaMalloc((void**)&d_b, sizeof(DATATYPE) * arraysizeL * arraysizeN);
// cudaMalloc((void**)&d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN);
// cudaMemcpy(d_a, a, sizeof(DATATYPE) * arraysizeM * arraysizeL, cudaMemcpyHostToDevice);
// cudaMemcpy(d_b, b, sizeof(DATATYPE) * arraysizeL * arraysizeN, cudaMemcpyHostToDevice);
// int blocks = (arraysizeN + threadnum - 1) / threadnum;
// matrix_multiplication_gpu_1 << <blocks * arraysizeN, threadnum >> > (d_a,arraysizeN,d_b,arraysizeN,d_c,arraysizeN,arraysizeN);
// cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN, cudaMemcpyDeviceToHost);
// printf("serial_transpose a × b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c[i] = 0;
// }
//
// //block线程循环矩阵乘法
// cudaMalloc((void**)&d_a, sizeof(DATATYPE) * arraysizeM * arraysizeL);
// cudaMalloc((void**)&d_b, sizeof(DATATYPE) * arraysizeL * arraysizeN);
// cudaMalloc((void**)&d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN);
// cudaMemcpy(d_a, a, sizeof(DATATYPE) * arraysizeM * arraysizeL, cudaMemcpyHostToDevice);
// cudaMemcpy(d_b, b, sizeof(DATATYPE) * arraysizeL * arraysizeN, cudaMemcpyHostToDevice);
// matrix_multiplication_gpu_1_0 << <arraysizeN, threadnum >> > (d_a, arraysizeN, d_b, arraysizeN, d_c, arraysizeN, arraysizeN);
// cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN, cudaMemcpyDeviceToHost);
// printf("block thread a × b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c[i] = 0;
// }
//
// //对齐存储
// size_t pitch_a, pitch_b, pitch_c;
// cudaMallocPitch((void**)&d_a, &pitch_a, sizeof(DATATYPE) * arraysizeN, arraysizeN);
// cudaMallocPitch((void**)&d_b, &pitch_b, sizeof(DATATYPE) * arraysizeN, arraysizeN);
// cudaMallocPitch((void**)&d_c3, &pitch_c, sizeof(DATATYPE) * arraysizeN, arraysizeN);
// //printf("%d,%d,%d", pitch_a, pitch_b, pitch_c);
// cudaMemcpy2D(d_a, pitch_a, a, sizeof(DATATYPE) * arraysizeN, sizeof(DATATYPE) * arraysizeN, arraysizeN, cudaMemcpyHostToDevice);
// cudaMemcpy2D(d_b, pitch_b, b, sizeof(DATATYPE) * arraysizeN, sizeof(DATATYPE) * arraysizeN, arraysizeN, cudaMemcpyHostToDevice);
// matrix_multiplication_gpu_2 << <arraysizeN, threadnum, sizeof(DATATYPE)* arraysizeN >> > (d_a, pitch_a / sizeof(DATATYPE), d_b, pitch_b / sizeof(DATATYPE), d_c3, pitch_c / sizeof(DATATYPE), arraysizeN);
// cudaMemcpy2D(c3, sizeof(DATATYPE) * arraysizeN, d_c3, pitch_c, sizeof(DATATYPE) * arraysizeN, arraysizeN, cudaMemcpyDeviceToHost);
// cudaFree(d_a);
// cudaFree(d_b);
// cudaFree(d_c3);
// printf("aligned storage a × b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c3 [i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c3 [i] = 0;
// }
//
// //棋盘阵列矩阵乘法
// int bx = (arraysizeN + threadnx - 1) / threadnx;
// dim3 blockns(bx, bx);
// dim3 threadns(threadnx, threadnx);
// cudaMallocPitch((void**)&d_a, &pitch_a, sizeof(DATATYPE)* arraysizeN, arraysizeN);
// cudaMallocPitch((void**)&d_b, &pitch_b, sizeof(DATATYPE)* arraysizeN, arraysizeN);
// cudaMallocPitch((void**)&d_c3, &pitch_c, sizeof(DATATYPE)* arraysizeN, arraysizeN);
// cudaMemcpy2D(d_a, pitch_a, a, sizeof(DATATYPE)* arraysizeN, sizeof(DATATYPE)* arraysizeN, arraysizeN, cudaMemcpyHostToDevice);
// cudaMemcpy2D(d_b, pitch_b, b, sizeof(DATATYPE)* arraysizeN, sizeof(DATATYPE)* arraysizeN, arraysizeN, cudaMemcpyHostToDevice);
// matrix_multiplication_gpu_3 << <blockns, threadns >> > (d_a, pitch_a / sizeof(DATATYPE), d_b, pitch_b / sizeof(DATATYPE), d_c3, pitch_c / sizeof(DATATYPE), arraysizeN);
// cudaMemcpy2D(c3, sizeof(DATATYPE)* arraysizeN, d_c3, pitch_c, sizeof(DATATYPE)* arraysizeN, arraysizeN, cudaMemcpyDeviceToHost);
// cudaFree(d_a);
// cudaFree(d_b);
// cudaFree(d_c3);
// printf("checkboard array a × b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c3[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c3[i] = 0;
// }
//
// //优化的棋盘阵列矩阵乘法
// cudaMallocPitch((void**)&d_a, &pitch_a, sizeof(DATATYPE) * arraysizeN, arraysizeN);
// cudaMallocPitch((void**)&d_b, &pitch_b, sizeof(DATATYPE) * arraysizeN, arraysizeN);
// cudaMallocPitch((void**)&d_c3, &pitch_c, sizeof(DATATYPE)* arraysizeN, arraysizeN);
// cudaMemcpy2D(d_a, pitch_a, a, sizeof(DATATYPE)* arraysizeN, sizeof(DATATYPE)* arraysizeN, arraysizeN, cudaMemcpyHostToDevice);
// cudaMemcpy2D(d_b, pitch_b, b, sizeof(DATATYPE)* arraysizeN, sizeof(DATATYPE)* arraysizeN, arraysizeN, cudaMemcpyHostToDevice);
// matrix_multiplication_gpu_4 << <blockns, threadns >> > (d_a, pitch_a / sizeof(DATATYPE), d_b, pitch_b / sizeof(DATATYPE), d_c3, pitch_c / sizeof(DATATYPE), arraysizeN);
// cudaMemcpy2D(c3, sizeof(DATATYPE)* arraysizeN, d_c3, pitch_c, sizeof(DATATYPE)* arraysizeN, arraysizeN, cudaMemcpyDeviceToHost);
// cudaFree(d_a);
// cudaFree(d_b);
// cudaFree(d_c3);
// printf("improved checkboard a × b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c3[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c3[i] = 0;
// }
// //cublas矩阵乘法
// cublasHandle_t handle;
// cublasCreate(&handle);
// cudaMalloc((void**)&d_a, sizeof(DATATYPE)* arraysizeN* arraysizeN);
// cudaMalloc((void**)&d_b, sizeof(DATATYPE)* arraysizeN* arraysizeN);
// cudaMalloc((void**)&d_c3, sizeof(DATATYPE)* arraysizeN* arraysizeN);
// float alpha = 1.0;
// float beta = 0.0;
// cublasSetVector(arraysizeN* arraysizeN, sizeof(DATATYPE), a, 1, d_a, 1);
// cublasSetVector(arraysizeN* arraysizeN, sizeof(DATATYPE), b, 1, d_b, 1);
// cublasSetVector(arraysizeN * arraysizeN, sizeof(DATATYPE), c3, 1, d_c3, 1);
// cublasSgemm_v2(handle, CUBLAS_OP_N, CUBLAS_OP_N, arraysizeN, arraysizeN, arraysizeN, &alpha, d_b, arraysizeN, d_a, arraysizeN, &beta, d_c3, arraysizeN);
// cublasGetVector(arraysizeN * arraysizeN, sizeof(DATATYPE), d_c3, 1, c3, 1);
// cudaFree(d_a);
// cudaFree(d_b);
// cudaFree(d_c3);
// cublasDestroy(handle);
// printf("cublas a × b = { \n");
// for (int i = 0; i < arraysizeM; i++) {
// for (int j = 0; j < arraysizeN; j++) {
// printf("%7.0f ", c3[i * arraysizeM + j]);
// }
// printf("\n");
// }
// for (int i = 0; i < arraysizeM * arraysizeL; i++) {
// c3[i] = 0;
// }
//
// return 0;
//}
//第九章 矩阵转置
//串行矩阵转置(连续读取) a:m行n列,c:n行m列
void matrix_transposition_serial_1(DATATYPE* a, DATATYPE* c, int m, int n) {
int i, j;
for (i = 0;i < m;i++) {
for (j = 0;j < n;j++) {
c[j * m + i] = a[i * n + j];
}
}
}
//串行矩阵转置(连续写入) a:m行n列,c:n行m列
void matrix_transposition_serial_2(DATATYPE* a, DATATYPE* c, int m, int n) {
int i, j;
for (i = 0;i < n;i++) {
for (j = 0;j < m;j++) {
c[i * n + j] = a[j * m + i];
}
}
}
//1D矩阵转置
__global__ void matrix_transposition_gpu_1d_1(DATATYPE* a, DATATYPE* c, int m, int n) {
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
int tid = tidx;
int bid = bidx;
while (bid < m) {
while (tid < n) {
c[tid * m + bid] = a[bid * n + tid];
tid += blockDim.x;
}
bid += gridDim.x;
}
}
//写连续1D矩阵转置
__global__ void matrix_transposition_gpu_1d_2(DATATYPE* a, DATATYPE* c, int m, int n) {
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
int tid = tidx;
int bid = bidx;
while (bid < m) {
while (tid < n) {
c[bid * n + tid] = a[tid * m + bid];
tid += blockDim.x;
}
bid += gridDim.x;
}
}
//2D矩阵转置
__global__ void matrix_transposition_gpu_2d_1(DATATYPE* a, DATATYPE* c, int m, int n) {
const unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
const unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
if ((xIndex < n) && (yIndex < m)) {
unsigned int index_in = xIndex + n * yIndex;
unsigned int index_out = yIndex + m * xIndex;
c[index_out] = a[index_in];
}
}
//共享存储2D矩阵转置
__global__ void matrix_transposition_gpu_2d_2(DATATYPE* a, DATATYPE* c, int m, int n) {
__shared__ DATATYPE tmp[blocknum][blocknum + 1];
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
if ((xIndex < arraysizeN) && (yIndex < arraysizeM)) {
unsigned int index_in = xIndex + n * yIndex;
tmp[threadIdx.y][threadIdx.x] = a[index_in];
}
__syncthreads();
xIndex = blockIdx.y * blocknum + threadIdx.x;
yIndex = blockIdx.x * blocknum + threadIdx.y;
if ((xIndex < m) && (yIndex < n)) {
unsigned int index_out = yIndex * m + xIndex;
c[index_out] = tmp[threadIdx.x][threadIdx.y];
}
//printf("%.0f %d \n", c[yIndex * m + xIndex], yIndex * m + xIndex);
}
//共享存储2D矩阵转置diagonal优化(结果不稳定?未成功解决
__global__ void matrix_transposition_gpu_diagonal(DATATYPE* a, DATATYPE* c, int m, int n) {
__shared__ float tile[blocknum][blocknum+1];
int blockIdx_x, blockIdx_y;
if (n == m) {
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
}
else {
int bid = blockIdx.x + gridDim.x * blockIdx.y;
blockIdx_y = bid % gridDim.y;
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
}
int xIndex = blockIdx_x * blocknum + threadIdx.x;
int yIndex = blockIdx_y * blocknum + threadIdx.y;
int index_in = xIndex + (yIndex)*n;
xIndex = blockIdx_y * blocknum + threadIdx.x;
yIndex = blockIdx_x * blocknum + threadIdx.y;
int index_out = xIndex + (yIndex)*m;
tile[threadIdx.y][threadIdx.x] = a[index_in];
__syncthreads();
c[index_out] = tile[threadIdx.x][threadIdx.y];
//printf("%.0f%1.0f %d %d %d \n",c[index_out],tile[threadIdx.x][threadIdx.y],threadIdx.x,threadIdx.y,index_out);
if (index_out == 0) {
for (int i = 0;i < blocknum;i++) {
for (int j = 0;j < blocknum + 1;j++) {
printf("%4.0f", tile[i][j]);
}
printf("\n");
}
}
}
int main() {
DATATYPE* a, * b, * c, * d_a, * d_b, * d_c, * d_c3, * c3;
a = (DATATYPE*)malloc(sizeof(DATATYPE*) * arraysizeM * arraysizeN);
//b = (DATATYPE*)malloc(sizeof(DATATYPE*) * arraysizeL * arraysizeN);
c = (DATATYPE*)malloc(sizeof(DATATYPE*) * arraysizeM * arraysizeN);
for (int i = 0; i < arraysizeM * arraysizeN; i++) {
a[i] = i;
}
/*for (int i = 0; i < arraysizeL * arraysizeN; i++) {
b[i] = i;
}*/
for (int i = 0; i < arraysizeM * arraysizeN; i++) {
c[i] = 0;
}
matrix_transposition_serial_1(a, c, arraysizeN, arraysizeN);
printf("串行矩阵转置:\n");
for (int i = 0;i < arraysizeN;i++) {
for (int j = 0;j < arraysizeM;j++) {
printf("%5.0f", c[i * arraysizeN + j]);
c[i * arraysizeN + j] = 0;
}
printf("\n");
}
matrix_transposition_serial_2(a, c, arraysizeN, arraysizeN);
printf("串行矩阵转置(连续写入):\n");
for (int i = 0;i < arraysizeN;i++) {
for (int j = 0;j < arraysizeM;j++) {
printf("%5.0f", c[i * arraysizeN + j]);
c[i * arraysizeN + j] = 0;
}
printf("\n");
}
cudaMalloc((void**)&d_a, sizeof(DATATYPE) * arraysizeM * arraysizeN);
cudaMalloc((void**)&d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN);
cudaMemcpy(d_a, a, sizeof(DATATYPE) * arraysizeM * arraysizeN, cudaMemcpyHostToDevice);
matrix_transposition_gpu_1d_1 << <512, 512 >> > (d_a, d_c, arraysizeN, arraysizeN);
cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN, cudaMemcpyDeviceToHost);
printf("1D矩阵转置:\n");
for (int i = 0;i < arraysizeN;i++) {
for (int j = 0;j < arraysizeM;j++) {
printf("%5.0f", c[i * arraysizeN + j]);
c[i * arraysizeN + j] = 0;
}
printf("\n");
}
matrix_transposition_gpu_1d_2 << <512, 512 >> > (d_a, d_c, arraysizeN, arraysizeN);
cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN, cudaMemcpyDeviceToHost);
printf("连续1D矩阵转置:\n");
for (int i = 0;i < arraysizeN;i++) {
for (int j = 0;j < arraysizeM;j++) {
printf("%5.0f", c[i * arraysizeN + j]);
c[i * arraysizeN + j] = 0;
}
printf("\n");
}
dim3 threads(blocknum, blocknum, 1);
dim3 blocks((arraysizeN + blocknum - 1) / blocknum, (arraysizeN + blocknum - 1) / blocknum, 1);
matrix_transposition_gpu_2d_1 << <blocks, threads >> > (d_a, d_c, arraysizeN, arraysizeN);
cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN, cudaMemcpyDeviceToHost);
printf("2D矩阵转置:\n");
for (int i = 0;i < arraysizeN;i++) {
for (int j = 0;j < arraysizeM;j++) {
printf("%5.0f", c[i * arraysizeN + j]);
c[i * arraysizeN + j] = 0;
}
printf("\n");
}
matrix_transposition_gpu_2d_2 << <blocks, threads >> > (d_a, d_c, arraysizeN, arraysizeN);
cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN, cudaMemcpyDeviceToHost);
printf("共享存储2D矩阵转置:\n");
for (int i = 0;i < arraysizeN;i++) {
for (int j = 0;j < arraysizeM;j++) {
printf("%5.0f", c[i * arraysizeN + j]);
c[i * arraysizeN + j] = 0;
}
printf("\n");
}
/*matrix_transposition_gpu_diagonal << <blocks, threads >> > (d_a, d_c, arraysizeN, arraysizeN);
cudaMemcpy(c, d_c, sizeof(DATATYPE) * arraysizeM * arraysizeN, cudaMemcpyDeviceToHost);
printf("共享存储2D矩阵转置diagonal优化(?:\n");
for (int i = 0;i < arraysizeN;i++) {
for (int j = 0;j < arraysizeM;j++) {
printf("%5.0f", c[i * arraysizeN + j]);
c[i * arraysizeN + j] = 0;
}
printf("\n");
}*/
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int* c, int* a, int* b, int size)
{
int* dev_a = 0;
int* dev_b = 0;
int* dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel << <1, size >> > (dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
970bd4bd2287dfc2e4fdf4c84b079309c8cf6a20.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 196608
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l2_cache/fadd_l2d_90_10_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
| 970bd4bd2287dfc2e4fdf4c84b079309c8cf6a20.cu | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 196608
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l2_cache/fadd_l2d_90_10_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
fb507cfe2e16ca7db54d917e36b5412f0eee8e25.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "stdio.h"
#define R 128
#define C 128
#define ITERS 1000000
#define g 64
void DisplayHeader()
{
const int kb = 1024;
const int mb = kb * kb;
std::cout << "NBody.GPU" <<"\n" << "=========" <<"\n" <<"\n";
std::cout << "CUDA version: v" << CUDART_VERSION <<"\n";
//std::cout << "Thrust version: v" << THRUST_MAJOR_VERSION << "." << THRUST_MINOR_VERSION <<"\n" <<"\n";
int devCount;
hipGetDeviceCount(&devCount);
std::cout << "CUDA Devices: " <<"\n" <<"\n";
for(int i = 0; i < devCount; ++i)
{
hipDeviceProp_t props;
hipGetDeviceProperties(&props, i);
std::cout << i << ": " << props.name << ": " << props.major << "." << props.minor <<"\n";
std::cout << " Global memory: " << props.totalGlobalMem / mb << "mb" <<"\n";
std::cout << " Shared memory: " << props.sharedMemPerBlock / kb << "kb" <<"\n";
std::cout << " Constant memory: " << props.totalConstMem / kb << "kb" <<"\n";
std::cout << " Block registers: " << props.regsPerBlock <<"\n" <<"\n";
std::cout << " Warp size: " << props.warpSize <<"\n";
std::cout << " Threads per block: " << props.maxThreadsPerBlock <<"\n";
std::cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", "<< props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2]<<" ]" <<"\n";
std::cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" <<"\n";
std::cout <<"\n";
}
}
__global__ void add(int *a, int *b, int *c) {
int gtid = (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y + blockDim.x*threadIdx.y + threadIdx.x;
if (gtid < R*C)
for(int i=0; i<ITERS; i++)
c[gtid] = sqrt( (float) (a[gtid] + b[gtid]));
}
__global__ void add_divergent(int *a, int *b, int *c) {
int gtid = (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y + blockDim.x*threadIdx.y + threadIdx.x;
if (gtid < R*C)
if(gtid & g)
for(int i=0; i<ITERS; i++)
c[gtid] = sqrt( (float) (a[gtid] + b[gtid]));
else
for(int i=0; i<ITERS; i++)
c[gtid] = sqrt( (float) (a[gtid] - b[gtid]));
}
int main() {
//DisplayHeader();
int a[R][C] , b[R][C] , c[R][C];
int *dev_a, *dev_b, *dev_c;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipMalloc((void **) &dev_a, R*C*sizeof(int));
hipMalloc((void **) &dev_b, R*C*sizeof(int));
hipMalloc((void **) &dev_c, R*C*sizeof(int));
// Fill Arrays
for (int i = 0; i < R; i++) {
for (int j = 0; j < C; j++) {
a[i][j] = C*i + j,
b[i][j] = R*C - a[i][j];
}
}
hipMemcpy(dev_a, a, R*C*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, R*C*sizeof(int), hipMemcpyHostToDevice);
// Kernel invocation
dim3 threadsPerBlock(8,8);
dim3 numBlocks( C/threadsPerBlock.x , R/threadsPerBlock.y);
std::cout<<"numBlocks.x="<<numBlocks.x<<" numBlocks.y="<<numBlocks.y<<"\n";
hipEventRecord(start);hipLaunchKernelGGL((
add), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, dev_a,dev_b,dev_c);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipMemcpy(c, dev_c, R*C*sizeof(int), hipMemcpyDeviceToHost);
float elapsed_time = 0;
hipEventElapsedTime(&elapsed_time, start, stop);
for (int i = 0; i < R; i++) {
for (int j = 0; j < C; j++) {
// std::cout << c[i][j] <<" ";
}
//std::cout<<"\n";
}
std::cout<<"divergence/"<<g<<" : Elapsed time = "<<elapsed_time<<" ms\n";
return 0;
}
| fb507cfe2e16ca7db54d917e36b5412f0eee8e25.cu | #include <iostream>
#include "stdio.h"
#define R 128
#define C 128
#define ITERS 1000000
#define g 64
void DisplayHeader()
{
const int kb = 1024;
const int mb = kb * kb;
std::cout << "NBody.GPU" <<"\n" << "=========" <<"\n" <<"\n";
std::cout << "CUDA version: v" << CUDART_VERSION <<"\n";
//std::cout << "Thrust version: v" << THRUST_MAJOR_VERSION << "." << THRUST_MINOR_VERSION <<"\n" <<"\n";
int devCount;
cudaGetDeviceCount(&devCount);
std::cout << "CUDA Devices: " <<"\n" <<"\n";
for(int i = 0; i < devCount; ++i)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, i);
std::cout << i << ": " << props.name << ": " << props.major << "." << props.minor <<"\n";
std::cout << " Global memory: " << props.totalGlobalMem / mb << "mb" <<"\n";
std::cout << " Shared memory: " << props.sharedMemPerBlock / kb << "kb" <<"\n";
std::cout << " Constant memory: " << props.totalConstMem / kb << "kb" <<"\n";
std::cout << " Block registers: " << props.regsPerBlock <<"\n" <<"\n";
std::cout << " Warp size: " << props.warpSize <<"\n";
std::cout << " Threads per block: " << props.maxThreadsPerBlock <<"\n";
std::cout << " Max block dimensions: [ " << props.maxThreadsDim[0] << ", "<< props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2]<<" ]" <<"\n";
std::cout << " Max grid dimensions: [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" <<"\n";
std::cout <<"\n";
}
}
__global__ void add(int *a, int *b, int *c) {
int gtid = (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y + blockDim.x*threadIdx.y + threadIdx.x;
if (gtid < R*C)
for(int i=0; i<ITERS; i++)
c[gtid] = sqrt( (float) (a[gtid] + b[gtid]));
}
__global__ void add_divergent(int *a, int *b, int *c) {
int gtid = (gridDim.x*blockIdx.y + blockIdx.x)*blockDim.x*blockDim.y + blockDim.x*threadIdx.y + threadIdx.x;
if (gtid < R*C)
if(gtid & g)
for(int i=0; i<ITERS; i++)
c[gtid] = sqrt( (float) (a[gtid] + b[gtid]));
else
for(int i=0; i<ITERS; i++)
c[gtid] = sqrt( (float) (a[gtid] - b[gtid]));
}
int main() {
//DisplayHeader();
int a[R][C] , b[R][C] , c[R][C];
int *dev_a, *dev_b, *dev_c;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc((void **) &dev_a, R*C*sizeof(int));
cudaMalloc((void **) &dev_b, R*C*sizeof(int));
cudaMalloc((void **) &dev_c, R*C*sizeof(int));
// Fill Arrays
for (int i = 0; i < R; i++) {
for (int j = 0; j < C; j++) {
a[i][j] = C*i + j,
b[i][j] = R*C - a[i][j];
}
}
cudaMemcpy(dev_a, a, R*C*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, R*C*sizeof(int), cudaMemcpyHostToDevice);
// Kernel invocation
dim3 threadsPerBlock(8,8);
dim3 numBlocks( C/threadsPerBlock.x , R/threadsPerBlock.y);
std::cout<<"numBlocks.x="<<numBlocks.x<<" numBlocks.y="<<numBlocks.y<<"\n";
cudaEventRecord(start);
add<<<numBlocks, threadsPerBlock>>>(dev_a,dev_b,dev_c);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaMemcpy(c, dev_c, R*C*sizeof(int), cudaMemcpyDeviceToHost);
float elapsed_time = 0;
cudaEventElapsedTime(&elapsed_time, start, stop);
for (int i = 0; i < R; i++) {
for (int j = 0; j < C; j++) {
// std::cout << c[i][j] <<" ";
}
//std::cout<<"\n";
}
std::cout<<"divergence/"<<g<<" : Elapsed time = "<<elapsed_time<<" ms\n";
return 0;
}
|
7c15fd84d21de7029afdf4803af3b68897c6b9a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define BLOCK_SIZE 16
/*
*********************************************************************
: gpu_matrix_mult
: 2 ( )
:
&a GPU A [m x n]
&b GPU B [n x k]
&c C[m x k]
:
:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
return: none
*********************************************************************
*/
__global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
/*
*********************************************************************
: gpu_square_matrix_mult
:
:
&a GPU A [m x n]
&b GPU B [n x k]
&c C[m x k]
:
:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
__global__ void gpu_square_matrix_mult(int *d_a, int *d_b, int *d_result, int n)
{
__shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub)
{
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n)
{
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n)
{
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n)
{
d_result[row * n + col] = tmp;
}
}
/*
*********************************************************************
: gpu_matrix_transpose
: matrix transpose
:
&mat_in rows X cols GPU
&mat_out cols X rows GPU
:
:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
__global__ void gpu_matrix_transpose(int* mat_in, int* mat_out, unsigned int rows, unsigned int cols)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < cols && idy < rows)
{
unsigned int pos = idy * cols + idx;
unsigned int trans_pos = idx * rows + idy;
mat_out[trans_pos] = mat_in[pos];
}
}
/*
*********************************************************************
: cpu_matrix_mult
: CPU
:
&a CPU
&b B CPU
&c ( ) CPU
return: none
*********************************************************************
*/
void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
int tmp = 0.0;
for (int h = 0; h < n; ++h)
{
tmp += h_a[i * n + h] * h_b[h * k + j];
}
h_result[i * k + j] = tmp;
}
}
}
/*
*********************************************************************
function name: main
description: test and compare
parameters:
none
return: none
*********************************************************************
*/
int main(int argc, char const *argv[])
{
int m, n, k;
/* Fixed seed for illustration */
srand(3333);
printf("please type in m n and k\n");
scanf("%d %d %d", &m, &n, &k);
// allocate memory in host RAM, h_cc is used to store CPU result
int *h_a, *h_b, *h_c, *h_cc;
hipHostMalloc((void **) &h_a, sizeof(int)*m*n);
hipHostMalloc((void **) &h_b, sizeof(int)*n*k);
hipHostMalloc((void **) &h_c, sizeof(int)*m*k);
hipHostMalloc((void **) &h_cc, sizeof(int)*m*k);
// random initialize matrix A
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
h_a[i * n + j] = rand() % 1024;
}
}
// random initialize matrix B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
h_b[i * k + j] = rand() % 1024;
}
}
float gpu_elapsed_time_ms, cpu_elapsed_time_ms;
// some events to count the execution time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// start to count execution time of GPU version
hipEventRecord(start, 0);
// Allocate memory space on the device
int *d_a, *d_b, *d_c;
hipMalloc((void **) &d_a, sizeof(int)*m*n);
hipMalloc((void **) &d_b, sizeof(int)*n*k);
hipMalloc((void **) &d_c, sizeof(int)*m*k);
// copy matrix A and B from host to device memory
hipMemcpy(d_a, h_a, sizeof(int)*m*n, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, sizeof(int)*n*k, hipMemcpyHostToDevice);
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// Launch kernel
if(m == n && n == k)
{
hipLaunchKernelGGL(( gpu_square_matrix_mult), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, n);
}
else
{
hipLaunchKernelGGL(( gpu_matrix_mult), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, m, n, k);
}
// Transfer results from device to host
hipMemcpy(h_c, d_c, sizeof(int)*m*k, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// time counting terminate
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// compute time elapse on GPU computing
hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n\n", m, n, n, k, gpu_elapsed_time_ms);
// start the CPU version
hipEventRecord(start, 0);
cpu_matrix_mult(h_a, h_b, h_cc, m, n, k);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&cpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n\n", m, n, n, k, cpu_elapsed_time_ms);
// validate results computed by GPU
int all_ok = 1;
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
if(h_cc[i*k + j] != h_c[i*k + j])
{
all_ok = 0;
}
}
}
// roughly compute speedup
if(all_ok)
{
printf("all results are correct!!!, speedup = %f\n", cpu_elapsed_time_ms / gpu_elapsed_time_ms);
}
else
{
printf("incorrect results\n");
}
// free memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipHostFree(h_a);
hipHostFree(h_b);
hipHostFree(h_c);
hipHostFree(h_cc);
return 0;
}
| 7c15fd84d21de7029afdf4803af3b68897c6b9a6.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define BLOCK_SIZE 16
/*
*********************************************************************
имя функции: gpu_matrix_mult
описание: поточечное вычисление перемножения 2 матриц(не только квадратных)
параметры:
&a указатель на GPU к матрице A [m x n]
&b указатель на GPU к матрице B [n x k]
&c указатель на результирующую матрицу C[m x k]
для сохранения результата
Заметка:
Сетка и блок должны быть сконфигурированы так:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
ускорение может быть достигнуто благодаря использованию разделяемой памяти
return: none
*********************************************************************
*/
__global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
/*
*********************************************************************
название функции: gpu_square_matrix_mult
описание: поточечное произведение квадратных матриц
параметры:
&a указатель на GPU к матрице A [m x n]
&b указатель на GPU к матрице B [n x k]
&c указатель на результирующую матрицу C[m x k]
для сохранения результата
Заметка:
Сетка и блок должны быть сконфигурированы так:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
__global__ void gpu_square_matrix_mult(int *d_a, int *d_b, int *d_result, int n)
{
__shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub)
{
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n)
{
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n)
{
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n)
{
d_result[row * n + col] = tmp;
}
}
/*
*********************************************************************
название функции: gpu_matrix_transpose
описание: matrix transpose
параметры:
&mat_in Указатель на матрицу rows X cols размерностью на GPU
&mat_out Указатель на результат матрицу cols X rows на GPU
для сохранения результата
Заметка:
Сетка и блок сконфигурированы так:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
__global__ void gpu_matrix_transpose(int* mat_in, int* mat_out, unsigned int rows, unsigned int cols)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx < cols && idy < rows)
{
unsigned int pos = idy * cols + idx;
unsigned int trans_pos = idx * rows + idy;
mat_out[trans_pos] = mat_in[pos];
}
}
/*
*********************************************************************
название функции: cpu_matrix_mult
описание: произведение матриц на CPU
параметры:
&a указатель на матрицу А на CPU
&b указатель на матрицу B на CPU
&c указатель на результат(матрицу С) на CPU
return: none
*********************************************************************
*/
void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
int tmp = 0.0;
for (int h = 0; h < n; ++h)
{
tmp += h_a[i * n + h] * h_b[h * k + j];
}
h_result[i * k + j] = tmp;
}
}
}
/*
*********************************************************************
function name: main
description: test and compare
parameters:
none
return: none
*********************************************************************
*/
int main(int argc, char const *argv[])
{
int m, n, k;
/* Fixed seed for illustration */
srand(3333);
printf("please type in m n and k\n");
scanf("%d %d %d", &m, &n, &k);
// allocate memory in host RAM, h_cc is used to store CPU result
int *h_a, *h_b, *h_c, *h_cc;
cudaMallocHost((void **) &h_a, sizeof(int)*m*n);
cudaMallocHost((void **) &h_b, sizeof(int)*n*k);
cudaMallocHost((void **) &h_c, sizeof(int)*m*k);
cudaMallocHost((void **) &h_cc, sizeof(int)*m*k);
// random initialize matrix A
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
h_a[i * n + j] = rand() % 1024;
}
}
// random initialize matrix B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
h_b[i * k + j] = rand() % 1024;
}
}
float gpu_elapsed_time_ms, cpu_elapsed_time_ms;
// some events to count the execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start to count execution time of GPU version
cudaEventRecord(start, 0);
// Allocate memory space on the device
int *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, sizeof(int)*m*n);
cudaMalloc((void **) &d_b, sizeof(int)*n*k);
cudaMalloc((void **) &d_c, sizeof(int)*m*k);
// copy matrix A and B from host to device memory
cudaMemcpy(d_a, h_a, sizeof(int)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int)*n*k, cudaMemcpyHostToDevice);
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// Launch kernel
if(m == n && n == k)
{
gpu_square_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
}
else
{
gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k);
}
// Transfer results from device to host
cudaMemcpy(h_c, d_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// time counting terminate
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n\n", m, n, n, k, gpu_elapsed_time_ms);
// start the CPU version
cudaEventRecord(start, 0);
cpu_matrix_mult(h_a, h_b, h_cc, m, n, k);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n\n", m, n, n, k, cpu_elapsed_time_ms);
// validate results computed by GPU
int all_ok = 1;
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
if(h_cc[i*k + j] != h_c[i*k + j])
{
all_ok = 0;
}
}
}
// roughly compute speedup
if(all_ok)
{
printf("all results are correct!!!, speedup = %f\n", cpu_elapsed_time_ms / gpu_elapsed_time_ms);
}
else
{
printf("incorrect results\n");
}
// free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFreeHost(h_cc);
return 0;
}
|
e1458abe15a95f8c469e2b1dc4968fdbdb56351b.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/SpatialMaxPooling.hip"
#else
#include <THHUNN/common.h>
void THNN_(SpatialMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode)
{
THNN_(SpatialDilatedMaxPooling_updateOutput)(
state, input, output, indices,
kW, kH, dW, dH, padW, padH, 1, 1, ceil_mode);
}
void THNN_(SpatialMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode)
{
THNN_(SpatialDilatedMaxPooling_updateGradInput)(
state, input, gradOutput, gradInput, indices,
kW, kH, dW, dH, padW, padH, 1, 1, ceil_mode);
}
#endif
| e1458abe15a95f8c469e2b1dc4968fdbdb56351b.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/SpatialMaxPooling.cu"
#else
#include <THCUNN/common.h>
void THNN_(SpatialMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode)
{
THNN_(SpatialDilatedMaxPooling_updateOutput)(
state, input, output, indices,
kW, kH, dW, dH, padW, padH, 1, 1, ceil_mode);
}
void THNN_(SpatialMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode)
{
THNN_(SpatialDilatedMaxPooling_updateGradInput)(
state, input, gradOutput, gradInput, indices,
kW, kH, dW, dH, padW, padH, 1, 1, ceil_mode);
}
#endif
|
22d567ce1dfa0c15da4e41319a6f63d1132eae3f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <util/dimage.h>
#include <util/timer.h>
#include <util/dmath.h>
#define MAX_FRAME 200
texture<float2, 2, hipReadModeElementType> tex_vector,tex_qpath;
texture<float4, 2, hipReadModeElementType>tex_vector_3d;
texture<float4, 2, hipReadModeElementType> tex_ext0, tex_ext1;
texture<float2, hipTextureType2DLayered, hipReadModeElementType> tex_forw0,tex_forw1;
texture<float4, hipTextureType2DLayered, hipReadModeElementType> tex_video0,tex_video1;
__global__ void kernel_render_halfway_image(uchar3* out, int rowstride, int width, int height, int ex,
float color_fa,float geo_fa,int color_from)
{
int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y;
int2 pos = make_int2(bx*blockDim.x + tx, by*blockDim.y + ty);
if(pos.x >= width|| pos.y >= height)
return;
float2 p,q,v,u;
p=q=make_float2(pos.x,pos.y);
v=tex2D(tex_vector,p.x+0.5f, p.y+0.5f);
u=tex2D(tex_qpath, p.x+0.5f, p.y+0.5f);
float alpha=0.8;
for(int i=0;i<20;i++)
{
p=q-(2*geo_fa-1)*v-(4*geo_fa-4*geo_fa*geo_fa)*u;
//p=q-(2*t_geo-1)*v;
v=alpha*tex2D(tex_vector, p.x+0.5f, p.y+0.5f)+(1-alpha)*v;
u=alpha*tex2D(tex_qpath, p.x+0.5f, p.y+0.5f)+(1-alpha)*u;
}
float4 c0,c1;
c0 = tex2D(tex_ext0, p.x-v.x+ex+0.5f, p.y-v.y+ex+0.5f);
c1 = tex2D(tex_ext1, p.x+v.x+ex+0.5f, p.y+v.y+ex+0.5f);
out += pos.y*rowstride+pos.x;
switch (color_from)
{
case 0:
*out=make_uchar3(c0.x+0.5,c0.y+0.5,c0.z+0.5);
break;
case 1:
*out=make_uchar3(c0.x*(1-color_fa)+c1.x*color_fa+0.5,c0.y*(1-color_fa)+c1.y*color_fa+0.5,c0.z*(1-color_fa)+c1.z*color_fa+0.5);
break;
case 2:
*out=make_uchar3(c1.x+0.5,c1.y+0.5,c1.z+0.5);
break;
}
}
void render_halfway_image(rod::dvector<uchar3> &out, int rowstride, int width, int height, int ex,
float color_fa, float geo_fa,int color_from,
const hipArray *img0,
const hipArray *img1,
const hipArray *vector,
const hipArray *qpath)
{
tex_ext0.normalized = false;
tex_ext0.filterMode = hipFilterModeLinear;
tex_ext0.addressMode[0] = tex_ext0.addressMode[1] = hipAddressModeClamp;
tex_ext1.normalized = false;
tex_ext1.filterMode = hipFilterModeLinear;
tex_ext1.addressMode[0] = tex_ext1.addressMode[1] = hipAddressModeClamp;
tex_vector.normalized = false;
tex_vector.filterMode = hipFilterModeLinear;
tex_vector.addressMode[0] = tex_vector.addressMode[1] = hipAddressModeClamp;
tex_qpath.normalized = false;
tex_qpath.filterMode = hipFilterModeLinear;
tex_qpath.addressMode[0] = tex_qpath.addressMode[1] = hipAddressModeClamp;
hipBindTextureToArray(tex_ext0, img0);
hipBindTextureToArray(tex_ext1, img1);
hipBindTextureToArray(tex_vector, vector);
hipBindTextureToArray(tex_qpath, qpath);
dim3 bdim(32,8),
gdim((width+bdim.x-1)/bdim.x,
(height+bdim.y-1)/bdim.y);
hipLaunchKernelGGL(( kernel_render_halfway_image), dim3(gdim), dim3(bdim), 0, 0, out, rowstride,width,height, ex, color_fa,geo_fa,color_from);
}
__global__ void kernel_render_resample_image0(uchar3* out, int rowstride, int width, int height, int depth,float fa,int frame)
{
int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y;
int2 pos = make_int2(bx*blockDim.x + tx, by*blockDim.y + ty);
if(pos.x >= width|| pos.y >= height)
return;
float4 c;
float4 p,q,v;
p=q=make_float4(pos.x,pos.y,frame,0);
v=tex2D(tex_vector_3d,p.x+0.5f, p.y+0.5f);
v.z=0;
float alpha=0.5;
for(int i=0;i<50;i++)
{
p=q+v;
v=alpha*tex2D(tex_vector_3d, p.x+0.5f, p.y+0.5f)+(1-alpha)*v;
v.z=0;
}
v=tex2D(tex_vector_3d, p.x+0.5f, p.y+0.5f);
q.z-=v.z;
if(q.z<=0)
c=tex2DLayered(tex_video0,q.x+0.5,q.y+0.5,0);
else if (q.z>=depth-1)
c=tex2DLayered(tex_video0,q.x+0.5,q.y+0.5,depth-1);
else
{
p=q;
p.z=floor(q.z);
float fa_z=q.z-p.z;
float2 f=tex2DLayered(tex_forw0,p.x+0.5,p.y+0.5,(int)(p.z+0.5));
for(int i=0;i<50;i++)
{
p=q-make_float4(f.x,f.y,1,0)*fa_z;
f=alpha*tex2DLayered(tex_forw0,p.x+0.5,p.y+0.5,(int)(p.z+0.5))+(1-alpha)*f;
}
c=tex2DLayered(tex_video0,p.x+0.5,p.y+0.5,(int)(p.z+0.5))*(1-fa_z)+tex2DLayered(tex_video0,p.x+0.5+f.x,p.y+0.5+f.y, (int)(p.z+0.5+1))*fa_z;
}
out += pos.y*rowstride+pos.x;
*out=make_uchar3((*out).x+(c.x+0.5)*(1-fa),(*out).y+(c.y+0.5)*(1-fa),(*out).z+(c.z+0.5)*(1-fa));
}
__global__ void kernel_render_resample_image1(uchar3* out, int rowstride, int width, int height, int depth,float fa,int frame)
{
int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y;
int2 pos = make_int2(bx*blockDim.x + tx, by*blockDim.y + ty);
if(pos.x >= width || pos.y >= height)
return;
float4 c;
float4 p,q,v;
p=q=make_float4(pos.x,pos.y,frame,0);
v=tex2D(tex_vector_3d,p.x+0.5f, p.y+0.5f);
v.z=0;
float alpha=0.5;
for(int i=0;i<50;i++)
{
p=q-v;
v=alpha*tex2D(tex_vector_3d, p.x+0.5f, p.y+0.5f)+(1-alpha)*v;
v.z=0;
}
v=tex2D(tex_vector_3d, p.x+0.5f, p.y+0.5f);
q.z+=v.z;
if(q.z<=0)
c=tex2DLayered(tex_video1,q.x+0.5,q.y+0.5,0);
else if (q.z>=depth-1)
c=tex2DLayered(tex_video1,q.x+0.5,q.y+0.5,depth-1);
else
{
p=q;
p.z=floor(q.z);
float fa_z=q.z-p.z;
float2 f=tex2DLayered(tex_forw1,p.x+0.5,p.y+0.5,(int)(p.z+0.5));
for(int i=0;i<50;i++)
{
p=q-make_float4(f.x,f.y,1,0)*fa_z;
f=alpha*tex2DLayered(tex_forw1,p.x+0.5,p.y+0.5,(int)(p.z+0.5))+(1-alpha)*f;
}
c=tex2DLayered(tex_video1,p.x+0.5,p.y+0.5,(int)(p.z+0.5))*(1-fa_z)+tex2DLayered(tex_video1,p.x+0.5+f.x,p.y+0.5+f.y,(int)(p.z+0.5+1))*fa_z;
}
out += pos.y*rowstride+pos.x;
*out=make_uchar3((*out).x+(c.x+0.5)*fa,(*out).y+(c.y+0.5)*fa,(*out).z+(c.z+0.5)*fa);
}
void render_resample_image(rod::dvector<uchar3> &out, int rowstride, int width, int height, int depth,
float fa, int frame,
const hipArray* img0,
const hipArray* img1,
const hipArray *vector,
const hipArray* f0,
const hipArray* f1)
{
tex_vector_3d.normalized = false;
tex_vector_3d.filterMode = hipFilterModeLinear;
tex_vector_3d.addressMode[0] = tex_vector_3d.addressMode[1] = hipAddressModeClamp;
hipBindTextureToArray(tex_vector_3d, vector);
tex_video0.normalized = false;
tex_video0.filterMode = hipFilterModeLinear;
tex_video0.addressMode[0] = tex_video0.addressMode[1] = tex_video0.addressMode[2]= hipAddressModeClamp;
hipBindTextureToArray(tex_video0, img0);
tex_video1.normalized = false;
tex_video1.filterMode = hipFilterModeLinear;
tex_video1.addressMode[0] = tex_video1.addressMode[1] = tex_video0.addressMode[2]=hipAddressModeClamp;
hipBindTextureToArray(tex_video1, img1);
tex_forw0.normalized = false;
tex_forw0.filterMode = hipFilterModeLinear;
tex_forw0.addressMode[0] = tex_forw0.addressMode[1] = tex_forw0.addressMode[2]=hipAddressModeClamp;
hipBindTextureToArray(tex_forw0, f0);
tex_forw1.normalized = false;
tex_forw1.filterMode = hipFilterModeLinear;
tex_forw1.addressMode[1] = tex_forw1.addressMode[1] = tex_forw1.addressMode[2]=hipAddressModeClamp;
hipBindTextureToArray(tex_forw1, f1);
out.fill(0);
dim3 bdim(32,8),
gdim((width+bdim.x-1)/bdim.x,
(height+bdim.y-1)/bdim.y);
if(fa<1)
hipLaunchKernelGGL(( kernel_render_resample_image0), dim3(gdim), dim3(bdim), 0, 0, out,rowstride, width, height, depth,fa,frame);
if(fa>0)
hipLaunchKernelGGL(( kernel_render_resample_image1), dim3(gdim), dim3(bdim), 0, 0, out,rowstride, width, height, depth,fa,frame);
} | 22d567ce1dfa0c15da4e41319a6f63d1132eae3f.cu | #include <cuda.h>
#include <util/dimage.h>
#include <util/timer.h>
#include <util/dmath.h>
#define MAX_FRAME 200
texture<float2, 2, cudaReadModeElementType> tex_vector,tex_qpath;
texture<float4, 2, cudaReadModeElementType>tex_vector_3d;
texture<float4, 2, cudaReadModeElementType> tex_ext0, tex_ext1;
texture<float2, cudaTextureType2DLayered, cudaReadModeElementType> tex_forw0,tex_forw1;
texture<float4, cudaTextureType2DLayered, cudaReadModeElementType> tex_video0,tex_video1;
__global__ void kernel_render_halfway_image(uchar3* out, int rowstride, int width, int height, int ex,
float color_fa,float geo_fa,int color_from)
{
int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y;
int2 pos = make_int2(bx*blockDim.x + tx, by*blockDim.y + ty);
if(pos.x >= width|| pos.y >= height)
return;
float2 p,q,v,u;
p=q=make_float2(pos.x,pos.y);
v=tex2D(tex_vector,p.x+0.5f, p.y+0.5f);
u=tex2D(tex_qpath, p.x+0.5f, p.y+0.5f);
float alpha=0.8;
for(int i=0;i<20;i++)
{
p=q-(2*geo_fa-1)*v-(4*geo_fa-4*geo_fa*geo_fa)*u;
//p=q-(2*t_geo-1)*v;
v=alpha*tex2D(tex_vector, p.x+0.5f, p.y+0.5f)+(1-alpha)*v;
u=alpha*tex2D(tex_qpath, p.x+0.5f, p.y+0.5f)+(1-alpha)*u;
}
float4 c0,c1;
c0 = tex2D(tex_ext0, p.x-v.x+ex+0.5f, p.y-v.y+ex+0.5f);
c1 = tex2D(tex_ext1, p.x+v.x+ex+0.5f, p.y+v.y+ex+0.5f);
out += pos.y*rowstride+pos.x;
switch (color_from)
{
case 0:
*out=make_uchar3(c0.x+0.5,c0.y+0.5,c0.z+0.5);
break;
case 1:
*out=make_uchar3(c0.x*(1-color_fa)+c1.x*color_fa+0.5,c0.y*(1-color_fa)+c1.y*color_fa+0.5,c0.z*(1-color_fa)+c1.z*color_fa+0.5);
break;
case 2:
*out=make_uchar3(c1.x+0.5,c1.y+0.5,c1.z+0.5);
break;
}
}
void render_halfway_image(rod::dvector<uchar3> &out, int rowstride, int width, int height, int ex,
float color_fa, float geo_fa,int color_from,
const cudaArray *img0,
const cudaArray *img1,
const cudaArray *vector,
const cudaArray *qpath)
{
tex_ext0.normalized = false;
tex_ext0.filterMode = cudaFilterModeLinear;
tex_ext0.addressMode[0] = tex_ext0.addressMode[1] = cudaAddressModeClamp;
tex_ext1.normalized = false;
tex_ext1.filterMode = cudaFilterModeLinear;
tex_ext1.addressMode[0] = tex_ext1.addressMode[1] = cudaAddressModeClamp;
tex_vector.normalized = false;
tex_vector.filterMode = cudaFilterModeLinear;
tex_vector.addressMode[0] = tex_vector.addressMode[1] = cudaAddressModeClamp;
tex_qpath.normalized = false;
tex_qpath.filterMode = cudaFilterModeLinear;
tex_qpath.addressMode[0] = tex_qpath.addressMode[1] = cudaAddressModeClamp;
cudaBindTextureToArray(tex_ext0, img0);
cudaBindTextureToArray(tex_ext1, img1);
cudaBindTextureToArray(tex_vector, vector);
cudaBindTextureToArray(tex_qpath, qpath);
dim3 bdim(32,8),
gdim((width+bdim.x-1)/bdim.x,
(height+bdim.y-1)/bdim.y);
kernel_render_halfway_image<<<gdim, bdim>>>(out, rowstride,width,height, ex, color_fa,geo_fa,color_from);
}
__global__ void kernel_render_resample_image0(uchar3* out, int rowstride, int width, int height, int depth,float fa,int frame)
{
int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y;
int2 pos = make_int2(bx*blockDim.x + tx, by*blockDim.y + ty);
if(pos.x >= width|| pos.y >= height)
return;
float4 c;
float4 p,q,v;
p=q=make_float4(pos.x,pos.y,frame,0);
v=tex2D(tex_vector_3d,p.x+0.5f, p.y+0.5f);
v.z=0;
float alpha=0.5;
for(int i=0;i<50;i++)
{
p=q+v;
v=alpha*tex2D(tex_vector_3d, p.x+0.5f, p.y+0.5f)+(1-alpha)*v;
v.z=0;
}
v=tex2D(tex_vector_3d, p.x+0.5f, p.y+0.5f);
q.z-=v.z;
if(q.z<=0)
c=tex2DLayered(tex_video0,q.x+0.5,q.y+0.5,0);
else if (q.z>=depth-1)
c=tex2DLayered(tex_video0,q.x+0.5,q.y+0.5,depth-1);
else
{
p=q;
p.z=floor(q.z);
float fa_z=q.z-p.z;
float2 f=tex2DLayered(tex_forw0,p.x+0.5,p.y+0.5,(int)(p.z+0.5));
for(int i=0;i<50;i++)
{
p=q-make_float4(f.x,f.y,1,0)*fa_z;
f=alpha*tex2DLayered(tex_forw0,p.x+0.5,p.y+0.5,(int)(p.z+0.5))+(1-alpha)*f;
}
c=tex2DLayered(tex_video0,p.x+0.5,p.y+0.5,(int)(p.z+0.5))*(1-fa_z)+tex2DLayered(tex_video0,p.x+0.5+f.x,p.y+0.5+f.y, (int)(p.z+0.5+1))*fa_z;
}
out += pos.y*rowstride+pos.x;
*out=make_uchar3((*out).x+(c.x+0.5)*(1-fa),(*out).y+(c.y+0.5)*(1-fa),(*out).z+(c.z+0.5)*(1-fa));
}
__global__ void kernel_render_resample_image1(uchar3* out, int rowstride, int width, int height, int depth,float fa,int frame)
{
int tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y;
int2 pos = make_int2(bx*blockDim.x + tx, by*blockDim.y + ty);
if(pos.x >= width || pos.y >= height)
return;
float4 c;
float4 p,q,v;
p=q=make_float4(pos.x,pos.y,frame,0);
v=tex2D(tex_vector_3d,p.x+0.5f, p.y+0.5f);
v.z=0;
float alpha=0.5;
for(int i=0;i<50;i++)
{
p=q-v;
v=alpha*tex2D(tex_vector_3d, p.x+0.5f, p.y+0.5f)+(1-alpha)*v;
v.z=0;
}
v=tex2D(tex_vector_3d, p.x+0.5f, p.y+0.5f);
q.z+=v.z;
if(q.z<=0)
c=tex2DLayered(tex_video1,q.x+0.5,q.y+0.5,0);
else if (q.z>=depth-1)
c=tex2DLayered(tex_video1,q.x+0.5,q.y+0.5,depth-1);
else
{
p=q;
p.z=floor(q.z);
float fa_z=q.z-p.z;
float2 f=tex2DLayered(tex_forw1,p.x+0.5,p.y+0.5,(int)(p.z+0.5));
for(int i=0;i<50;i++)
{
p=q-make_float4(f.x,f.y,1,0)*fa_z;
f=alpha*tex2DLayered(tex_forw1,p.x+0.5,p.y+0.5,(int)(p.z+0.5))+(1-alpha)*f;
}
c=tex2DLayered(tex_video1,p.x+0.5,p.y+0.5,(int)(p.z+0.5))*(1-fa_z)+tex2DLayered(tex_video1,p.x+0.5+f.x,p.y+0.5+f.y,(int)(p.z+0.5+1))*fa_z;
}
out += pos.y*rowstride+pos.x;
*out=make_uchar3((*out).x+(c.x+0.5)*fa,(*out).y+(c.y+0.5)*fa,(*out).z+(c.z+0.5)*fa);
}
void render_resample_image(rod::dvector<uchar3> &out, int rowstride, int width, int height, int depth,
float fa, int frame,
const cudaArray* img0,
const cudaArray* img1,
const cudaArray *vector,
const cudaArray* f0,
const cudaArray* f1)
{
tex_vector_3d.normalized = false;
tex_vector_3d.filterMode = cudaFilterModeLinear;
tex_vector_3d.addressMode[0] = tex_vector_3d.addressMode[1] = cudaAddressModeClamp;
cudaBindTextureToArray(tex_vector_3d, vector);
tex_video0.normalized = false;
tex_video0.filterMode = cudaFilterModeLinear;
tex_video0.addressMode[0] = tex_video0.addressMode[1] = tex_video0.addressMode[2]= cudaAddressModeClamp;
cudaBindTextureToArray(tex_video0, img0);
tex_video1.normalized = false;
tex_video1.filterMode = cudaFilterModeLinear;
tex_video1.addressMode[0] = tex_video1.addressMode[1] = tex_video0.addressMode[2]=cudaAddressModeClamp;
cudaBindTextureToArray(tex_video1, img1);
tex_forw0.normalized = false;
tex_forw0.filterMode = cudaFilterModeLinear;
tex_forw0.addressMode[0] = tex_forw0.addressMode[1] = tex_forw0.addressMode[2]=cudaAddressModeClamp;
cudaBindTextureToArray(tex_forw0, f0);
tex_forw1.normalized = false;
tex_forw1.filterMode = cudaFilterModeLinear;
tex_forw1.addressMode[1] = tex_forw1.addressMode[1] = tex_forw1.addressMode[2]=cudaAddressModeClamp;
cudaBindTextureToArray(tex_forw1, f1);
out.fill(0);
dim3 bdim(32,8),
gdim((width+bdim.x-1)/bdim.x,
(height+bdim.y-1)/bdim.y);
if(fa<1)
kernel_render_resample_image0<<<gdim, bdim>>>(out,rowstride, width, height, depth,fa,frame);
if(fa>0)
kernel_render_resample_image1<<<gdim, bdim>>>(out,rowstride, width, height, depth,fa,frame);
} |
baef455f4716303e2431f04510407dcc7c75c64f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__
inline void adt_calc_gpu(const double *x1, const double *x2, const double *x3, const double *x4, const double *q,double *adt){
double dx,dy, ri,u,v,c;
ri = 1.0f/q[0];
u = ri*q[1];
v = ri*q[2];
c = sqrt(gam*gm1*(ri*q[3]-0.5f*(u*u+v*v)));
dx = x2[0] - x1[0];
dy = x2[1] - x1[1];
*adt = fabs(u*dy-v*dx) + c*sqrt(dx*dx+dy*dy);
dx = x3[0] - x2[0];
dy = x3[1] - x2[1];
*adt += fabs(u*dy-v*dx) + c*sqrt(dx*dx+dy*dy);
dx = x4[0] - x3[0];
dy = x4[1] - x3[1];
*adt += fabs(u*dy-v*dx) + c*sqrt(dx*dx+dy*dy);
dx = x1[0] - x4[0];
dy = x1[1] - x4[1];
*adt += fabs(u*dy-v*dx) + c*sqrt(dx*dx+dy*dy);
//*adt = (*adt) / cfl;
*adt = (*adt) * (1.0f / cfl);
}
// CUDA kernel function
__global__ void op_cuda_adt_calc(
const double *__restrict ind_arg0,
const int *__restrict opDat0Map,
const double *__restrict arg4,
double *arg5,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelem; n = n+=blockDim.x ){
int map0idx;
int map1idx;
int map2idx;
int map3idx;
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat0Map[n + offset_b + set_size * 2];
map3idx = opDat0Map[n + offset_b + set_size * 3];
//user-supplied kernel call
adt_calc_gpu(ind_arg0+map0idx*2,
ind_arg0+map1idx*2,
ind_arg0+map2idx*2,
ind_arg0+map3idx*2,
arg4+(n+offset_b)*4,
arg5+(n+offset_b)*1);
}
}
//GPU host stub function
void op_par_loop_adt_calc_gpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5){
int nargs = 6;
op_arg args[6];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(1);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
if (OP_kernels[1].count==1) op_register_strides();
int ninds = 1;
int inds[6] = {0,0,0,0,-1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: adt_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_1
int part_size = OP_PART_SIZE_1;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
hipLaunchKernelGGL(( op_cuda_adt_calc), dim3(nblocks),dim3(nthread), 0, 0,
(double *)arg0.data_d,
arg0.map_data_d,
(double*)arg4.data_d,
(double*)arg5.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[1].transfer += Plan->transfer;
OP_kernels[1].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[1].time += wall_t2 - wall_t1;
}
void op_par_loop_adt_calc_cpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5);
//GPU host stub function
#if OP_HYBRID_GPU
void op_par_loop_adt_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5){
if (OP_hybrid_gpu) {
op_par_loop_adt_calc_gpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5);
}else{
op_par_loop_adt_calc_cpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5);
}
}
#else
void op_par_loop_adt_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5){
op_par_loop_adt_calc_gpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5);
}
#endif //OP_HYBRID_GPU
| baef455f4716303e2431f04510407dcc7c75c64f.cu | //
// auto-generated by op2.py
//
//user function
__device__
inline void adt_calc_gpu(const double *x1, const double *x2, const double *x3, const double *x4, const double *q,double *adt){
double dx,dy, ri,u,v,c;
ri = 1.0f/q[0];
u = ri*q[1];
v = ri*q[2];
c = sqrt(gam*gm1*(ri*q[3]-0.5f*(u*u+v*v)));
dx = x2[0] - x1[0];
dy = x2[1] - x1[1];
*adt = fabs(u*dy-v*dx) + c*sqrt(dx*dx+dy*dy);
dx = x3[0] - x2[0];
dy = x3[1] - x2[1];
*adt += fabs(u*dy-v*dx) + c*sqrt(dx*dx+dy*dy);
dx = x4[0] - x3[0];
dy = x4[1] - x3[1];
*adt += fabs(u*dy-v*dx) + c*sqrt(dx*dx+dy*dy);
dx = x1[0] - x4[0];
dy = x1[1] - x4[1];
*adt += fabs(u*dy-v*dx) + c*sqrt(dx*dx+dy*dy);
//*adt = (*adt) / cfl;
*adt = (*adt) * (1.0f / cfl);
}
// CUDA kernel function
__global__ void op_cuda_adt_calc(
const double *__restrict ind_arg0,
const int *__restrict opDat0Map,
const double *__restrict arg4,
double *arg5,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelem; n = n+=blockDim.x ){
int map0idx;
int map1idx;
int map2idx;
int map3idx;
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat0Map[n + offset_b + set_size * 2];
map3idx = opDat0Map[n + offset_b + set_size * 3];
//user-supplied kernel call
adt_calc_gpu(ind_arg0+map0idx*2,
ind_arg0+map1idx*2,
ind_arg0+map2idx*2,
ind_arg0+map3idx*2,
arg4+(n+offset_b)*4,
arg5+(n+offset_b)*1);
}
}
//GPU host stub function
void op_par_loop_adt_calc_gpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5){
int nargs = 6;
op_arg args[6];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(1);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
if (OP_kernels[1].count==1) op_register_strides();
int ninds = 1;
int inds[6] = {0,0,0,0,-1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: adt_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_1
int part_size = OP_PART_SIZE_1;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
op_cuda_adt_calc<<<nblocks,nthread>>>(
(double *)arg0.data_d,
arg0.map_data_d,
(double*)arg4.data_d,
(double*)arg5.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[1].transfer += Plan->transfer;
OP_kernels[1].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[1].time += wall_t2 - wall_t1;
}
void op_par_loop_adt_calc_cpu(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5);
//GPU host stub function
#if OP_HYBRID_GPU
void op_par_loop_adt_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5){
if (OP_hybrid_gpu) {
op_par_loop_adt_calc_gpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5);
}else{
op_par_loop_adt_calc_cpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5);
}
}
#else
void op_par_loop_adt_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5){
op_par_loop_adt_calc_gpu(name, set,
arg0,
arg1,
arg2,
arg3,
arg4,
arg5);
}
#endif //OP_HYBRID_GPU
|
378ee67c17eb4fbda87ecdee2b1df6eaf6113fa5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <chrono>
#ifdef __HIPCC__
#define KERNEL_ARGS2(grid,hipLaunchKernelGGL(( block)) , dim3(grid), dim3(block) , 0, 0,
#define KERNEL_ARGS3grid, block,hipLaunchKernelGGL(( sh_mem)) , dim3(grid), dim3(block), sh_mem , 0,
#define KERNEL_ARGS4grid, block, sh_mem,hipLaunchKernelGGL(( stream)) , dim3(grid), dim3(block), sh_mem, stream ,
#else
#define KERNEL_ARGS2grid, block)
#define KERNEL_ARGS3(grid, block, sh_mem)
#define KERNEL_ARGS4(grid, block, sh_mem, stream)
#endif
using namespace std;
// Some commonly referenced values
#define NUM_BITS ((unsigned int)(8 * sizeof(unsigned int)))
#define dataArrayLength ((data_length + NUM_BITS - 1) / NUM_BITS)
#define resultArrayLength ((data_length * NUM_FILTERS + NUM_BITS - 1) / NUM_BITS)
// Macro for automatically checking and reporting CUDA errors
#define CUDACHECK(cmd) do { \
hipError_t e = cmd; \
if( e != hipSuccess ) { \
printf("Failed: Cuda error %s:%d '%s'\n", \
__FILE__,__LINE__,hipGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
} while(0)
// Constants defined for encoding
unsigned int data_length = 256;
const unsigned int THREADS = 256;
const unsigned int FILTER_LENGTH = 3;
const unsigned int NUM_FILTERS = 2;
const unsigned long long FILTERS = (0b111 << 3) | (0b011);
// misc global data
const int test_iters = 1;
char nums[2] = { '0', '1' };
__global__
void convEncode(const unsigned int *data, unsigned int *output) {
unsigned int start = blockIdx.x * (THREADS / NUM_BITS);
unsigned int idx = threadIdx.x;
// Retrieve the relevant bits from the data and and shift them to LSB
unsigned int val = (((((idx / NUM_BITS) | start) == 0 ? 0 // Don't attempt to look up non-existant values
: data[idx / NUM_BITS + start - 1] << (1 + (idx % NUM_BITS)))) // Retrieve both the previous value in the array
| (data[idx / NUM_BITS + start] >> (NUM_BITS - 1 - (idx % NUM_BITS)))) // and the current, in case the filter lies on a word boundary
& ((1 << FILTER_LENGTH) - 1);
__shared__ char bits[THREADS];
bits[idx] = 0;
#pragma unroll
// Each thread evaluates all of the filters on its data
for (unsigned int i = 0; i < NUM_FILTERS; i++) {
unsigned int n = val & ((unsigned int)((FILTERS >> ((NUM_FILTERS - i - 1) * FILTER_LENGTH))
& ((1 << FILTER_LENGTH) - 1)));
n ^= n >> 1; // Parity of pairs of bits
n ^= n >> 2; // Parity of every 4 bits
n = (n & 0x11111111U) * 0x11111111U; // Multiply to combine all the 4 bit parities
n = (n >> 28) & 1; // Shift and mask the parity bit to the LSB
bits[idx] |= (n << (NUM_FILTERS - 1 - i));
}
__syncthreads();
// Write the result from the shared memory to device memory.
// Every element in the output array is handled by a separate thread
// to prevent errors from threads overwriting each other.
if (idx < NUM_FILTERS * THREADS / NUM_BITS) {
unsigned int o = 0;
for (int i = idx * NUM_BITS / NUM_FILTERS; i < (idx + 1) * NUM_BITS / NUM_FILTERS; i++) {
o <<= NUM_FILTERS;
o |= bits[i] & (~(1 << NUM_FILTERS));
}
output[NUM_FILTERS * start + idx] = o;
}
}
void printBitString(const unsigned int *data, int length) {
for (int i = 0; i < length; i++) {
for (int j = 0; j < NUM_BITS; j++) {
cout << nums[(data[i] >> (NUM_BITS - j)) & 1];
}
}
cout << endl;
}
int hex2int(char ch) {
if (ch >= '0' && ch <= '9')
return ch - '0';
if (ch >= 'A' && ch <= 'F')
return ch - 'A' + 10;
if (ch >= 'a' && ch <= 'f')
return ch - 'a' + 10;
return -1;
}
unsigned int* loadInpData(const char *inp) {
int messageLen = strlen(inp);
printf("Message length: %d", messageLen);
unsigned int *data = (unsigned int*)(malloc(messageLen));
for (int i = 0; i < messageLen; i++) {
data[i / (sizeof(unsigned int) / sizeof(char))] <<= 8;
data[i / (sizeof(unsigned int) / sizeof(char))] |= (0xFF & (hex2int(inp[i])));
}
return data;
}
long long runTest(unsigned long length, unsigned int fill, unsigned int numIters) {
unsigned int *A, *B, *C;
C = (unsigned int*)(malloc(sizeof(unsigned int) * dataArrayLength));
memset(C, fill, sizeof(unsigned int) * dataArrayLength);
CUDACHECK(hipMallocManaged(&A, ((length + NUM_BITS - 1) / NUM_BITS) * sizeof(unsigned int)));
CUDACHECK(hipMallocManaged(&B, ((length * NUM_FILTERS + NUM_BITS - 1) / NUM_BITS) * sizeof(unsigned int)));
// CUDACHECK(hipMemset(A, fill, ((length + NUM_BITS - 1) / NUM_BITS) * sizeof(unsigned int)));
memcpy(A, C, sizeof(unsigned int) * dataArrayLength);
CUDACHECK(hipMemset(B, 0, ((length * NUM_FILTERS + NUM_BITS - 1) / NUM_BITS) * sizeof(unsigned int)));
auto start = chrono::high_resolution_clock::now();
for (unsigned int j = 0; j < numIters; j++) {
// This macro is used because the compiler throws a compile error when trying to parse
// the correct CUDA kernel syntax
convEncode KERNEL_ARGS2((length + THREADS - 1) / THREADS, THREADS) (A, B);
CUDACHECK(hipDeviceSynchronize());
}
auto end = chrono::high_resolution_clock::now();
long long elapsed_time = chrono::duration_cast<chrono::microseconds>(end - start).count();
hipFree(A);
hipFree(B);
return elapsed_time;
}
/**
* When called from the command line, the following arguments can be passed:
* -t -- Flag to indicate the code should execute a performance test. Any arguments following this flag will be ignored
* -m -- Use user provided data. Data should immediately follow the flag and should be in hex format.
* -l -- Specifies a custom message length, in bits. If the -m flag is also used, this will have no effect.
* -f -- Specifies the 32 bit value to fill the input array with. All formats accepted, with appropriate prefix (0b, 0x, etc.)
* If -m flag is set, has no effect.
*/
int main(int argc, char *argv[]) {
unsigned int fillVal = 0xFFFFFFFF;
bool useProvidedData = false, runtest = false;
unsigned int *data;
// Parse command line arguments
// Usage:
if (argc > 1) {
for (int i = 1; i < argc; i++) {
if (argv[i][0] == '-') {
//printf("%c\t%d\t%d\n", argv[i][1], i, argv[i][1] == 'l');
switch (argv[i][1]) {
case 'L':
case 'l':
i++;
if (i < argc and !useProvidedData) { data_length = (NUM_BITS - 1 + (unsigned int)(strtol(argv[i], NULL, 0))) / NUM_BITS * NUM_BITS; }
break;
case 'F':
case 'f':
i++;
if (i < argc) { fillVal = (unsigned int)(strtol(argv[i], NULL, 0)); }
break;
case 'M':
case 'm':
i++;
if (i < argc) {
data = loadInpData(argv[i]);
useProvidedData = true;
}
break;
case 't':
case 'T':
i = argc;
useProvidedData = false, runtest = true;
break;
default:
printf("Invalid argument: %s", argv[i]);
exit(1);
}
}
}
}
unsigned int *A, *B;
CUDACHECK(hipMallocManaged(&A, dataArrayLength * sizeof(unsigned int)));
CUDACHECK(hipMallocManaged(&B, resultArrayLength * sizeof(unsigned int)));
if (!useProvidedData) {
memset(A, fillVal, dataArrayLength * sizeof(unsigned int));
}
else {
memcpy(A, data, sizeof(unsigned int) * dataArrayLength);
}
CUDACHECK(hipMemset(B, 0, resultArrayLength * sizeof(unsigned int)));
// Run the kernel once to reduce timing errors from initial run
convEncode KERNEL_ARGS2((data_length + THREADS - 1) / THREADS, THREADS) (A, B);
CUDACHECK(hipDeviceSynchronize());
if (runtest) {
long long times[31];
for (int j = 0; j < test_iters; j++) {
for (int i = 0; i < 31; i++) {
if (j == 0) { times[i] = 0; }
times[i] += runTest(1 << i, fillVal, 1);
}
}
for (int i = 0; i < 31; i++) {
printf("%d,%lld\n", 1 << i, times[i] / test_iters);
}
return 0;
}
auto start = chrono::high_resolution_clock::now();
// This macro is used because the compiler throws a compile error when trying to parse
// the correct CUDA kernel syntax
convEncode KERNEL_ARGS2((data_length + THREADS - 1) / THREADS, THREADS) (A, B);
CUDACHECK(hipDeviceSynchronize());
auto end = chrono::high_resolution_clock::now() - start;
long long elapsed_time = chrono::duration_cast<chrono::microseconds>(end).count();
printf("Elapsed time %lld \u03bcs\n", elapsed_time);
hipFree(A);
hipFree(B);
return 0;
}
| 378ee67c17eb4fbda87ecdee2b1df6eaf6113fa5.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#include <stdio.h>
#include <math.h>
#include <iostream>
#include <chrono>
#ifdef __CUDACC__
#define KERNEL_ARGS2(grid, block) <<< grid, block >>>
#define KERNEL_ARGS3(grid, block, sh_mem) <<< grid, block, sh_mem >>>
#define KERNEL_ARGS4(grid, block, sh_mem, stream) <<< grid, block, sh_mem, stream >>>
#else
#define KERNEL_ARGS2(grid, block)
#define KERNEL_ARGS3(grid, block, sh_mem)
#define KERNEL_ARGS4(grid, block, sh_mem, stream)
#endif
using namespace std;
// Some commonly referenced values
#define NUM_BITS ((unsigned int)(8 * sizeof(unsigned int)))
#define dataArrayLength ((data_length + NUM_BITS - 1) / NUM_BITS)
#define resultArrayLength ((data_length * NUM_FILTERS + NUM_BITS - 1) / NUM_BITS)
// Macro for automatically checking and reporting CUDA errors
#define CUDACHECK(cmd) do { \
cudaError_t e = cmd; \
if( e != cudaSuccess ) { \
printf("Failed: Cuda error %s:%d '%s'\n", \
__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
} while(0)
// Constants defined for encoding
unsigned int data_length = 256;
const unsigned int THREADS = 256;
const unsigned int FILTER_LENGTH = 3;
const unsigned int NUM_FILTERS = 2;
const unsigned long long FILTERS = (0b111 << 3) | (0b011);
// misc global data
const int test_iters = 1;
char nums[2] = { '0', '1' };
__global__
void convEncode(const unsigned int *data, unsigned int *output) {
unsigned int start = blockIdx.x * (THREADS / NUM_BITS);
unsigned int idx = threadIdx.x;
// Retrieve the relevant bits from the data and and shift them to LSB
unsigned int val = (((((idx / NUM_BITS) | start) == 0 ? 0 // Don't attempt to look up non-existant values
: data[idx / NUM_BITS + start - 1] << (1 + (idx % NUM_BITS)))) // Retrieve both the previous value in the array
| (data[idx / NUM_BITS + start] >> (NUM_BITS - 1 - (idx % NUM_BITS)))) // and the current, in case the filter lies on a word boundary
& ((1 << FILTER_LENGTH) - 1);
__shared__ char bits[THREADS];
bits[idx] = 0;
#pragma unroll
// Each thread evaluates all of the filters on its data
for (unsigned int i = 0; i < NUM_FILTERS; i++) {
unsigned int n = val & ((unsigned int)((FILTERS >> ((NUM_FILTERS - i - 1) * FILTER_LENGTH))
& ((1 << FILTER_LENGTH) - 1)));
n ^= n >> 1; // Parity of pairs of bits
n ^= n >> 2; // Parity of every 4 bits
n = (n & 0x11111111U) * 0x11111111U; // Multiply to combine all the 4 bit parities
n = (n >> 28) & 1; // Shift and mask the parity bit to the LSB
bits[idx] |= (n << (NUM_FILTERS - 1 - i));
}
__syncthreads();
// Write the result from the shared memory to device memory.
// Every element in the output array is handled by a separate thread
// to prevent errors from threads overwriting each other.
if (idx < NUM_FILTERS * THREADS / NUM_BITS) {
unsigned int o = 0;
for (int i = idx * NUM_BITS / NUM_FILTERS; i < (idx + 1) * NUM_BITS / NUM_FILTERS; i++) {
o <<= NUM_FILTERS;
o |= bits[i] & (~(1 << NUM_FILTERS));
}
output[NUM_FILTERS * start + idx] = o;
}
}
void printBitString(const unsigned int *data, int length) {
for (int i = 0; i < length; i++) {
for (int j = 0; j < NUM_BITS; j++) {
cout << nums[(data[i] >> (NUM_BITS - j)) & 1];
}
}
cout << endl;
}
int hex2int(char ch) {
if (ch >= '0' && ch <= '9')
return ch - '0';
if (ch >= 'A' && ch <= 'F')
return ch - 'A' + 10;
if (ch >= 'a' && ch <= 'f')
return ch - 'a' + 10;
return -1;
}
unsigned int* loadInpData(const char *inp) {
int messageLen = strlen(inp);
printf("Message length: %d", messageLen);
unsigned int *data = (unsigned int*)(malloc(messageLen));
for (int i = 0; i < messageLen; i++) {
data[i / (sizeof(unsigned int) / sizeof(char))] <<= 8;
data[i / (sizeof(unsigned int) / sizeof(char))] |= (0xFF & (hex2int(inp[i])));
}
return data;
}
long long runTest(unsigned long length, unsigned int fill, unsigned int numIters) {
unsigned int *A, *B, *C;
C = (unsigned int*)(malloc(sizeof(unsigned int) * dataArrayLength));
memset(C, fill, sizeof(unsigned int) * dataArrayLength);
CUDACHECK(cudaMallocManaged(&A, ((length + NUM_BITS - 1) / NUM_BITS) * sizeof(unsigned int)));
CUDACHECK(cudaMallocManaged(&B, ((length * NUM_FILTERS + NUM_BITS - 1) / NUM_BITS) * sizeof(unsigned int)));
// CUDACHECK(cudaMemset(A, fill, ((length + NUM_BITS - 1) / NUM_BITS) * sizeof(unsigned int)));
memcpy(A, C, sizeof(unsigned int) * dataArrayLength);
CUDACHECK(cudaMemset(B, 0, ((length * NUM_FILTERS + NUM_BITS - 1) / NUM_BITS) * sizeof(unsigned int)));
auto start = chrono::high_resolution_clock::now();
for (unsigned int j = 0; j < numIters; j++) {
// This macro is used because the compiler throws a compile error when trying to parse
// the correct CUDA kernel syntax
convEncode KERNEL_ARGS2((length + THREADS - 1) / THREADS, THREADS) (A, B);
CUDACHECK(cudaDeviceSynchronize());
}
auto end = chrono::high_resolution_clock::now();
long long elapsed_time = chrono::duration_cast<chrono::microseconds>(end - start).count();
cudaFree(A);
cudaFree(B);
return elapsed_time;
}
/**
* When called from the command line, the following arguments can be passed:
* -t -- Flag to indicate the code should execute a performance test. Any arguments following this flag will be ignored
* -m -- Use user provided data. Data should immediately follow the flag and should be in hex format.
* -l -- Specifies a custom message length, in bits. If the -m flag is also used, this will have no effect.
* -f -- Specifies the 32 bit value to fill the input array with. All formats accepted, with appropriate prefix (0b, 0x, etc.)
* If -m flag is set, has no effect.
*/
int main(int argc, char *argv[]) {
unsigned int fillVal = 0xFFFFFFFF;
bool useProvidedData = false, runtest = false;
unsigned int *data;
// Parse command line arguments
// Usage:
if (argc > 1) {
for (int i = 1; i < argc; i++) {
if (argv[i][0] == '-') {
//printf("%c\t%d\t%d\n", argv[i][1], i, argv[i][1] == 'l');
switch (argv[i][1]) {
case 'L':
case 'l':
i++;
if (i < argc and !useProvidedData) { data_length = (NUM_BITS - 1 + (unsigned int)(strtol(argv[i], NULL, 0))) / NUM_BITS * NUM_BITS; }
break;
case 'F':
case 'f':
i++;
if (i < argc) { fillVal = (unsigned int)(strtol(argv[i], NULL, 0)); }
break;
case 'M':
case 'm':
i++;
if (i < argc) {
data = loadInpData(argv[i]);
useProvidedData = true;
}
break;
case 't':
case 'T':
i = argc;
useProvidedData = false, runtest = true;
break;
default:
printf("Invalid argument: %s", argv[i]);
exit(1);
}
}
}
}
unsigned int *A, *B;
CUDACHECK(cudaMallocManaged(&A, dataArrayLength * sizeof(unsigned int)));
CUDACHECK(cudaMallocManaged(&B, resultArrayLength * sizeof(unsigned int)));
if (!useProvidedData) {
memset(A, fillVal, dataArrayLength * sizeof(unsigned int));
}
else {
memcpy(A, data, sizeof(unsigned int) * dataArrayLength);
}
CUDACHECK(cudaMemset(B, 0, resultArrayLength * sizeof(unsigned int)));
// Run the kernel once to reduce timing errors from initial run
convEncode KERNEL_ARGS2((data_length + THREADS - 1) / THREADS, THREADS) (A, B);
CUDACHECK(cudaDeviceSynchronize());
if (runtest) {
long long times[31];
for (int j = 0; j < test_iters; j++) {
for (int i = 0; i < 31; i++) {
if (j == 0) { times[i] = 0; }
times[i] += runTest(1 << i, fillVal, 1);
}
}
for (int i = 0; i < 31; i++) {
printf("%d,%lld\n", 1 << i, times[i] / test_iters);
}
return 0;
}
auto start = chrono::high_resolution_clock::now();
// This macro is used because the compiler throws a compile error when trying to parse
// the correct CUDA kernel syntax
convEncode KERNEL_ARGS2((data_length + THREADS - 1) / THREADS, THREADS) (A, B);
CUDACHECK(cudaDeviceSynchronize());
auto end = chrono::high_resolution_clock::now() - start;
long long elapsed_time = chrono::duration_cast<chrono::microseconds>(end).count();
printf("Elapsed time %lld \u03bcs\n", elapsed_time);
cudaFree(A);
cudaFree(B);
return 0;
}
|
5814e28f2eda4c565f49359d484fa3c41c53af44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/function/kernel/batch_normalization.cuh>
namespace nbla {
template <typename T>
void forward_batch(const int size0, const int size1, const int size2,
const float decay_rate, const float eps, const T *x,
const T *gamma, const T *beta, T *m, T *v, T *rm, T *rv,
T *y) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(forward_batch_mean_variance_kernel,
/* Input */
size1, size2, size0 * size2, size1 * size2,
decay_rate, eps, x, gamma, beta,
/* Output */
m, v, rm, rv);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(forward_batch_gamma_beta_kernel,
/* Input */
size1 * size0 * size2, size0, size2,
size0 * size2, size1 * size2, decay_rate, eps,
x, m, v, rm, rv, gamma, beta,
/* Output */
y);
}
template <typename T>
void backward_batch_data(const int size0, const int size1, const int size2,
const float decay_rate, const float eps, const T *dy,
const T *m, const T *v, const T *x, const T *g,
const T *dm, const T *dv, T *dx, T *dmean, T *dvar) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(backward_batch_data_mean_variance_kernel,
/* Input */
size1, size2, size0 * size2, size1 * size2,
decay_rate, eps, dy, m, v, x, g, dm, dv,
/* Output */
dmean, dvar);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(backward_batch_data_dx_kernel,
/* Input */
size1 * size0 * size2, size0, size1, size2,
size0 * size2, size1 * size2, decay_rate, eps,
dy, m, v, x, g, dm, dv, dmean, dvar,
/* Output */
dx);
}
//#define TEST_FEATURE_MEAN_VARIANCE_AXIS_REDUCTION_KERNEL
//#define TEST_FEATURE_MEAN_VARIANCE_KERNEL
template <typename T>
void forward_batch_parallel_reduction(
const int size0, const int size1, const int size2, const int ndim,
const int *axes, const int *x_strides, const int *x_shape,
const int *y_strides, const int *y_shape, const float decay_rate,
const float eps, const T *x, const T *gamma, const T *beta, T *x_trans,
T *m, T *v, T *rm, T *rv, T *y, T *tmp_mean_buffer_per_block,
T *tmp_variance_buffer_per_block, T *inv_sqrt_variance) {
int N = size0 * size2;
reduction_blocks(blocks, N);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(transpose_kernel, size1 * N, ndim, axes,
x_strides, y_strides, y_shape, x, x_trans);
#ifdef TEST_FEATURE_MEAN_VARIANCE_AXIS_REDUCTION_KERNEL
printf("TEST_FEATURE_MEAN_VARIANCE_AXIS_REDUCTION_KERNEL\n");
hipLaunchKernelGGL(( mean_variance_with_axis_kernel), dim3(blocks), dim3(NBLA_CUDA_NUM_THREADS), 0, 0,
x_trans, tmp_mean_buffer_per_block, tmp_variance_buffer_per_block, m, v,
N, blocks, size1);
#elif defined TEST_FEATURE_MEAN_VARIANCE_KERNEL
printf("TEST_FEATURE_MEAN_VARIANCE_KERNEL\n");
for (int i = 0; i < size1; ++i) {
hipLaunchKernelGGL(( mean_variance_kernel), dim3(blocks), dim3(NBLA_CUDA_NUM_THREADS), 0, 0,
x_trans + i * N, tmp_mean_buffer_per_block,
tmp_variance_buffer_per_block, m + i, v + i, N, blocks);
}
#else
blocks = min((N + NBLA_CUDA_NUM_THREADS - 1) / NBLA_CUDA_NUM_THREADS, 1024);
for (int i = 0; i < size1; ++i) {
hipLaunchKernelGGL(( forward_batch_kernel_mean_variance_preprocess), dim3(blocks),
dim3(NBLA_CUDA_NUM_THREADS), 0, 0,
/* Input */
x_trans + i * N, N,
/* Output */
tmp_mean_buffer_per_block, tmp_variance_buffer_per_block);
hipLaunchKernelGGL(( forward_batch_kernel_mean_variance_postprocess), dim3(1), dim3(1024), 0, 0,
/* Input */
tmp_mean_buffer_per_block, tmp_variance_buffer_per_block, blocks,
decay_rate, 1. / N, (float)N / (N - 1),
/* Output */
m + i, v + i, rm + i, rv + i);
}
#endif
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(forward_batch_kernel_gamma_beta_trans,
/* Input */
size1 * N, N, x_trans, gamma, beta, m, v,
decay_rate, eps, ndim, axes, y_strides,
x_strides, x_shape,
/* Output */
y, inv_sqrt_variance);
}
template <typename T>
void backward_batch_data_parallel_reduction(
const int size0, const int size1, const int size2, const int ndim,
const int *axes, const int *x_strides, const int *x_shape,
const int *y_strides, const int *y_shape, const float decay_rate,
const float eps, const T *dy, const T *m, const T *v, const T *x,
const T *g, const T *dm, const T *dv, T *dx, T *tmp_mean_buffer_per_block,
T *tmp_variance_buffer_per_block, T *tmp_t_buffer_per_block, T *dmean,
T *dvar, T *t, T *inv_sqrt_variance, T *x_trans, T *dy_trans) {
int N = size0 * size2;
int shape_size = size1 * N;
int blocks =
min((N + NBLA_CUDA_NUM_THREADS - 1) / NBLA_CUDA_NUM_THREADS, 1024);
for (int i = 0; i < size1; i++) {
hipLaunchKernelGGL(( backward_batch_data_kernel_mean_variance_preprocess),
dim3(blocks), dim3(NBLA_CUDA_NUM_THREADS), 0, 0,
/* Input */
N, dy_trans + i * N, x_trans + i * N, g + i, m + i,
/* Output */
tmp_mean_buffer_per_block, tmp_variance_buffer_per_block,
tmp_t_buffer_per_block);
hipLaunchKernelGGL(( backward_batch_data_kernel_mean_variance_postprocess), dim3(1), dim3(1024), 0, 0,
/* Input */
tmp_mean_buffer_per_block, tmp_variance_buffer_per_block,
tmp_t_buffer_per_block, blocks, 1. / N, v + i, dm, dv, eps, N,
inv_sqrt_variance + i, i,
/* Output */
dmean + i, dvar + i, t + i);
}
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(backward_batch_data_kernel_gamma_beta_trans,
/* Input */
shape_size, 1. / N, dy_trans, x_trans, g, v, m,
dmean, dvar, ndim, axes, y_strides, x_strides,
x_shape, inv_sqrt_variance,
/* Output */
dx);
}
template <typename T>
void backward_batch_gamma_beta_parallel_reduction(
const int size0, const int size1, const int size2, const T *dy_trans,
const T *m, const T *v, const T *x_trans, float eps, T *db, T *dg,
T *gamma_reduction_space, T *beta_reduction_space, T *inv_sqrt_variance) {
int N = size0 * size2;
int blocks =
min((N + NBLA_CUDA_NUM_THREADS - 1) / NBLA_CUDA_NUM_THREADS, 1024);
for (int i = 0; i < size1; i++) {
hipLaunchKernelGGL(( backward_batch_kernel_gamma_beta_preprocess), dim3(blocks),
dim3(NBLA_CUDA_NUM_THREADS), 0, 0,
/* Input */
N, dy_trans + i * N, x_trans + i * N, m + i,
/* Output */
gamma_reduction_space, beta_reduction_space, inv_sqrt_variance + i);
hipLaunchKernelGGL(( backward_batch_kernel_gamma_beta_postprocess), dim3(1), dim3(1024), 0, 0,
/* Input */
gamma_reduction_space, beta_reduction_space, blocks,
/* Output */
dg + i, db + i);
}
}
}
| 5814e28f2eda4c565f49359d484fa3c41c53af44.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/function/kernel/batch_normalization.cuh>
namespace nbla {
template <typename T>
void forward_batch(const int size0, const int size1, const int size2,
const float decay_rate, const float eps, const T *x,
const T *gamma, const T *beta, T *m, T *v, T *rm, T *rv,
T *y) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(forward_batch_mean_variance_kernel,
/* Input */
size1, size2, size0 * size2, size1 * size2,
decay_rate, eps, x, gamma, beta,
/* Output */
m, v, rm, rv);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(forward_batch_gamma_beta_kernel,
/* Input */
size1 * size0 * size2, size0, size2,
size0 * size2, size1 * size2, decay_rate, eps,
x, m, v, rm, rv, gamma, beta,
/* Output */
y);
}
template <typename T>
void backward_batch_data(const int size0, const int size1, const int size2,
const float decay_rate, const float eps, const T *dy,
const T *m, const T *v, const T *x, const T *g,
const T *dm, const T *dv, T *dx, T *dmean, T *dvar) {
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(backward_batch_data_mean_variance_kernel,
/* Input */
size1, size2, size0 * size2, size1 * size2,
decay_rate, eps, dy, m, v, x, g, dm, dv,
/* Output */
dmean, dvar);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(backward_batch_data_dx_kernel,
/* Input */
size1 * size0 * size2, size0, size1, size2,
size0 * size2, size1 * size2, decay_rate, eps,
dy, m, v, x, g, dm, dv, dmean, dvar,
/* Output */
dx);
}
//#define TEST_FEATURE_MEAN_VARIANCE_AXIS_REDUCTION_KERNEL
//#define TEST_FEATURE_MEAN_VARIANCE_KERNEL
template <typename T>
void forward_batch_parallel_reduction(
const int size0, const int size1, const int size2, const int ndim,
const int *axes, const int *x_strides, const int *x_shape,
const int *y_strides, const int *y_shape, const float decay_rate,
const float eps, const T *x, const T *gamma, const T *beta, T *x_trans,
T *m, T *v, T *rm, T *rv, T *y, T *tmp_mean_buffer_per_block,
T *tmp_variance_buffer_per_block, T *inv_sqrt_variance) {
int N = size0 * size2;
reduction_blocks(blocks, N);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(transpose_kernel, size1 * N, ndim, axes,
x_strides, y_strides, y_shape, x, x_trans);
#ifdef TEST_FEATURE_MEAN_VARIANCE_AXIS_REDUCTION_KERNEL
printf("TEST_FEATURE_MEAN_VARIANCE_AXIS_REDUCTION_KERNEL\n");
mean_variance_with_axis_kernel<<<blocks, NBLA_CUDA_NUM_THREADS>>>(
x_trans, tmp_mean_buffer_per_block, tmp_variance_buffer_per_block, m, v,
N, blocks, size1);
#elif defined TEST_FEATURE_MEAN_VARIANCE_KERNEL
printf("TEST_FEATURE_MEAN_VARIANCE_KERNEL\n");
for (int i = 0; i < size1; ++i) {
mean_variance_kernel<<<blocks, NBLA_CUDA_NUM_THREADS>>>(
x_trans + i * N, tmp_mean_buffer_per_block,
tmp_variance_buffer_per_block, m + i, v + i, N, blocks);
}
#else
blocks = min((N + NBLA_CUDA_NUM_THREADS - 1) / NBLA_CUDA_NUM_THREADS, 1024);
for (int i = 0; i < size1; ++i) {
forward_batch_kernel_mean_variance_preprocess<<<blocks,
NBLA_CUDA_NUM_THREADS>>>(
/* Input */
x_trans + i * N, N,
/* Output */
tmp_mean_buffer_per_block, tmp_variance_buffer_per_block);
forward_batch_kernel_mean_variance_postprocess<<<1, 1024>>>(
/* Input */
tmp_mean_buffer_per_block, tmp_variance_buffer_per_block, blocks,
decay_rate, 1. / N, (float)N / (N - 1),
/* Output */
m + i, v + i, rm + i, rv + i);
}
#endif
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(forward_batch_kernel_gamma_beta_trans,
/* Input */
size1 * N, N, x_trans, gamma, beta, m, v,
decay_rate, eps, ndim, axes, y_strides,
x_strides, x_shape,
/* Output */
y, inv_sqrt_variance);
}
template <typename T>
void backward_batch_data_parallel_reduction(
const int size0, const int size1, const int size2, const int ndim,
const int *axes, const int *x_strides, const int *x_shape,
const int *y_strides, const int *y_shape, const float decay_rate,
const float eps, const T *dy, const T *m, const T *v, const T *x,
const T *g, const T *dm, const T *dv, T *dx, T *tmp_mean_buffer_per_block,
T *tmp_variance_buffer_per_block, T *tmp_t_buffer_per_block, T *dmean,
T *dvar, T *t, T *inv_sqrt_variance, T *x_trans, T *dy_trans) {
int N = size0 * size2;
int shape_size = size1 * N;
int blocks =
min((N + NBLA_CUDA_NUM_THREADS - 1) / NBLA_CUDA_NUM_THREADS, 1024);
for (int i = 0; i < size1; i++) {
backward_batch_data_kernel_mean_variance_preprocess<<<
blocks, NBLA_CUDA_NUM_THREADS>>>(
/* Input */
N, dy_trans + i * N, x_trans + i * N, g + i, m + i,
/* Output */
tmp_mean_buffer_per_block, tmp_variance_buffer_per_block,
tmp_t_buffer_per_block);
backward_batch_data_kernel_mean_variance_postprocess<<<1, 1024>>>(
/* Input */
tmp_mean_buffer_per_block, tmp_variance_buffer_per_block,
tmp_t_buffer_per_block, blocks, 1. / N, v + i, dm, dv, eps, N,
inv_sqrt_variance + i, i,
/* Output */
dmean + i, dvar + i, t + i);
}
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(backward_batch_data_kernel_gamma_beta_trans,
/* Input */
shape_size, 1. / N, dy_trans, x_trans, g, v, m,
dmean, dvar, ndim, axes, y_strides, x_strides,
x_shape, inv_sqrt_variance,
/* Output */
dx);
}
template <typename T>
void backward_batch_gamma_beta_parallel_reduction(
const int size0, const int size1, const int size2, const T *dy_trans,
const T *m, const T *v, const T *x_trans, float eps, T *db, T *dg,
T *gamma_reduction_space, T *beta_reduction_space, T *inv_sqrt_variance) {
int N = size0 * size2;
int blocks =
min((N + NBLA_CUDA_NUM_THREADS - 1) / NBLA_CUDA_NUM_THREADS, 1024);
for (int i = 0; i < size1; i++) {
backward_batch_kernel_gamma_beta_preprocess<<<blocks,
NBLA_CUDA_NUM_THREADS>>>(
/* Input */
N, dy_trans + i * N, x_trans + i * N, m + i,
/* Output */
gamma_reduction_space, beta_reduction_space, inv_sqrt_variance + i);
backward_batch_kernel_gamma_beta_postprocess<<<1, 1024>>>(
/* Input */
gamma_reduction_space, beta_reduction_space, blocks,
/* Output */
dg + i, db + i);
}
}
}
|
300f4723e7ccf9adf559ae1fd6d4a04a20c6daad.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <vector>
#include <mpi.h> //activate mpi
#include "netcdf_par.h"
#include "file/nc_utilities.h"
#include "toeflR.cuh"
#include "dg/algorithm.h"
#include "dg/backend/xspacelib.cuh"
#include "parameters.h"
#include "dg/backend/timer.cuh"
/*
- reads parameters from input.txt or any other given file,
- integrates the ToeflR - functor and
- writes outputs to a given outputfile using hdf5.
density fields are the real densities in XSPACE ( not logarithmic values)
*/
int main( int argc, char* argv[])
{
////////////////////////////////setup MPI///////////////////////////////
int provided;
MPI_Init_thread( &argc, &argv, MPI_THREAD_FUNNELED, &provided);
if( provided != MPI_THREAD_FUNNELED)
{
std::cerr << "wrong mpi-thread environment provided!\n";
return -1;
}
int periods[2] = {false, true}; //non-, periodic
int rank, size;
MPI_Comm_rank( MPI_COMM_WORLD, &rank);
MPI_Comm_size( MPI_COMM_WORLD, &size);
#if THRUST_DEVICE_SYSTEM==THRUST_DEVICE_SYSTEM_CUDA
int num_devices=0;
hipGetDeviceCount(&num_devices);
if(num_devices==0){std::cerr << "No CUDA capable devices found"<<std::endl; return -1;}
int device = rank % num_devices; //assume # of gpus/node is fixed
hipSetDevice( device);
#endif//cuda
int np[2];
if(rank==0)
{
std::cin>> np[0] >> np[1];
std::cout << "Computing with "<<np[0]<<" x "<<np[1]<<" = "<<size<<std::endl;
assert( size == np[0]*np[1]);
}
MPI_Bcast( np, 2, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm comm;
MPI_Cart_create( MPI_COMM_WORLD, 2, np, periods, true, &comm);
////////////////////////Parameter initialisation//////////////////////////
Json::Reader reader;
Json::Value js;
if( argc != 3)
{
if(rank==0)std::cerr << "ERROR: Wrong number of arguments!\nUsage: "<< argv[0]<<" [inputfile] [outputfile]\n";
return -1;
}
else
{
std::ifstream is(argv[1]);
reader.parse( is, js, false); //read input without comments
}
std::string input = js.toStyledString(); //save input without comments, which is important if netcdf file is later read by another parser
const Parameters p( js);
if(rank==0)p.display( std::cout);
////////////////////////////////set up computations///////////////////////////
dg::MPIGrid2d grid( 0, p.lx, 0, p.ly, p.n, p.Nx, p.Ny, p.bc_x, p.bc_y, comm);
dg::MPIGrid2d grid_out( 0., p.lx, 0.,p.ly, p.n_out, p.Nx_out, p.Ny_out, p.bc_x, p.bc_y, comm);
//create RHS
dg::ToeflR< dg::CartesianMPIGrid2d, dg::MDMatrix, dg::MDVec > test( grid, p);
dg::Diffusion<dg::CartesianMPIGrid2d, dg::MDMatrix, dg::MDVec> diffusion( grid, p.nu);
//////////////////create initial vector///////////////////////////////////////
dg::Gaussian g( p.posX*p.lx, p.posY*p.ly, p.sigma, p.sigma, p.amp);
std::vector<dg::MDVec> y0(2, dg::evaluate( g, grid)), y1(y0); // n_e' = gaussian
dg::blas2::symv( test.gamma(), y0[0], y0[1]); // n_e = \Gamma_i n_i -> n_i = ( 1+alphaDelta) n_e' + 1
{
dg::MDVec v2d = dg::create::inv_weights(grid);
dg::blas2::symv( v2d, y0[1], y0[1]);
}
if( p.equations == "gravity_local" || p.equations == "gravity_global" || p.equations == "drift_global" ){
y0[1] = dg::evaluate( dg::zero, grid);
}
//////////////////////////////////////////////////////////////////////
//////////////////initialisation of timestepper and first step///////////////////
double time = 0;
//dg::AB< k, std::vector<dg::MDVec> > ab( y0);
dg::Karniadakis< std::vector<dg::MDVec> > ab( y0, y0[0].size(), 1e-9);
ab.init( test, diffusion, y0, p.dt);
y0.swap( y1); //y1 now contains value at zero time
/////////////////////////////set up netcdf/////////////////////////////////////
file::NC_Error_Handle err;
int ncid;
MPI_Info info = MPI_INFO_NULL;
err = nc_create_par( argv[2],NC_NETCDF4|NC_MPIIO|NC_CLOBBER,comm,info, &ncid);
err = nc_put_att_text( ncid, NC_GLOBAL, "inputfile", input.size(), input.data());
const int version[3] = {FELTOR_MAJOR_VERSION, FELTOR_MINOR_VERSION, FELTOR_SUBMINOR_VERSION}; //write maybe to json file!?
err = nc_put_att_int( ncid, NC_GLOBAL, "feltor_major_version", NC_INT, 1, &version[0]);
err = nc_put_att_int( ncid, NC_GLOBAL, "feltor_minor_version", NC_INT, 1, &version[1]);
err = nc_put_att_int( ncid, NC_GLOBAL, "feltor_subminor_version", NC_INT, 1, &version[2]);
int dim_ids[3], tvarID;
err = file::define_dimensions( ncid, dim_ids, &tvarID, grid_out.global());
//field IDs
std::string names[4] = {"electrons", "ions", "potential", "vorticity"};
int dataIDs[4];
for( unsigned i=0; i<4; i++){
err = nc_def_var( ncid, names[i].data(), NC_DOUBLE, 3, dim_ids, &dataIDs[i]);}
//energy IDs
int EtimeID, EtimevarID;
err = file::define_time( ncid, "energy_time", &EtimeID, &EtimevarID);
int energyID, massID, dissID, dEdtID;
err = nc_def_var( ncid, "energy", NC_DOUBLE, 1, &EtimeID, &energyID);
err = nc_def_var( ncid, "mass", NC_DOUBLE, 1, &EtimeID, &massID);
err = nc_def_var( ncid, "dissipation", NC_DOUBLE, 1, &EtimeID, &dissID);
err = nc_def_var( ncid, "dEdt", NC_DOUBLE, 1, &EtimeID, &dEdtID);
for(unsigned i=0; i<4; i++)
err = nc_var_par_access( ncid, dataIDs[i], NC_COLLECTIVE);
err = nc_var_par_access( ncid, tvarID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, EtimevarID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, energyID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, massID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, dissID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, dEdtID, NC_COLLECTIVE);
err = nc_enddef(ncid);
///////////////////////////////////first output/////////////////////////
int dims[2], coords[2];
MPI_Cart_get( comm, 2, dims, periods, coords);
size_t count[3] = {1, grid_out.n()*grid_out.Ny(), grid_out.n()*grid_out.Nx()};
size_t start[3] = {0, coords[1]*count[1], coords[0]*count[2]};
size_t Estart[] = {0};
size_t Ecount[] = {1};
dg::MDVec transfer( dg::evaluate(dg::zero, grid));
dg::DVec transferD( dg::evaluate(dg::zero, grid_out.local()));
dg::HVec transferH( dg::evaluate(dg::zero, grid_out.local()));
dg::IDMatrix interpolate = dg::create::interpolation( grid_out.local(), grid.local()); //create local interpolation matrix
for( unsigned i=0; i<2; i++)
{
dg::blas2::gemv( interpolate, y0[i].data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[i], start, count, transferH.data() );
}
//pot
transfer = test.potential()[0];
dg::blas2::gemv( interpolate, transfer.data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[2], start, count, transferH.data() );
//Vor
transfer = test.potential()[0];
dg::blas2::gemv( diffusion.laplacianM(), transfer, y1[1]);
dg::blas2::gemv( interpolate,y1[1].data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[3], start, count, transferH.data() );
err = nc_put_vara_double( ncid, tvarID, start, count, &time);
//err = nc_close(ncid);
///////////////////////////////////////Timeloop/////////////////////////////////
const double mass0 = test.mass(), mass_blob0 = mass0 - grid.lx()*grid.ly();
double E0 = test.energy(), energy0 = E0, E1 = 0, diff = 0;
dg::Timer t;
t.tic();
try
{
#ifdef DG_BENCHMARK
unsigned step = 0;
#endif //DG_BENCHMARK
for( unsigned i=1; i<=p.maxout; i++)
{
#ifdef DG_BENCHMARK
dg::Timer ti;
ti.tic();
#endif//DG_BENCHMARK
for( unsigned j=0; j<p.itstp; j++)
{
ab( test, diffusion, y0);
y0.swap( y1); //attention on -O3 ?
//store accuracy details
{
if(rank==0)std::cout << "(m_tot-m_0)/m_0: "<< (test.mass()-mass0)/mass_blob0<<"\t";
E0 = E1;
E1 = test.energy();
diff = (E1 - E0)/p.dt;
double diss = test.energy_diffusion( );
if(rank==0)std::cout << "(E_tot-E_0)/E_0: "<< (E1-energy0)/energy0<<"\t";
if(rank==0)std::cout << "Accuracy: "<< 2.*(diff-diss)/(diff+diss)<<"\n";
}
time+=p.dt;
Estart[0] += 1;
{
//err = nc_open(argv[2], NC_WRITE, &ncid);
double ener=test.energy(), mass=test.mass(), diff=test.mass_diffusion(), dEdt=test.energy_diffusion();
err = nc_put_vara_double( ncid, EtimevarID, Estart, Ecount, &time);
err = nc_put_vara_double( ncid, energyID, Estart, Ecount, &ener);
err = nc_put_vara_double( ncid, massID, Estart, Ecount, &mass);
err = nc_put_vara_double( ncid, dissID, Estart, Ecount, &diff);
err = nc_put_vara_double( ncid, dEdtID, Estart, Ecount, &dEdt);
//err = nc_close(ncid);
}
}
//////////////////////////write fields////////////////////////
start[0] = i;
for( unsigned j=0; j<2; j++)
{
dg::blas2::gemv( interpolate, y0[j].data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[j], start, count, transferH.data());
}
transfer = test.potential()[0];
dg::blas2::gemv( interpolate, transfer.data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[2], start, count, transferH.data() );
transfer = test.potential()[0];
dg::blas2::gemv( diffusion.laplacianM(), transfer, y1[1]); //correct?
dg::blas2::gemv( interpolate,y1[1].data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[3], start, count, transferH.data() );
err = nc_put_vara_double( ncid, tvarID, start, count, &time);
#ifdef DG_BENCHMARK
ti.toc();
step+=p.itstp;
if(rank==0)std::cout << "\n\t Step "<<step <<" of "<<p.itstp*p.maxout <<" at time "<<time;
if(rank==0)std::cout << "\n\t Average time for one step: "<<ti.diff()/(double)p.itstp<<"s\n\n"<<std::flush;
#endif//DG_BENCHMARK
}
}
catch( dg::Fail& fail) {
if(rank==0)std::cerr << "CG failed to converge to "<<fail.epsilon()<<"\n";
if(rank==0)std::cerr << "Does Simulation respect CFL condition?\n";
}
t.toc();
unsigned hour = (unsigned)floor(t.diff()/3600);
unsigned minute = (unsigned)floor( (t.diff() - hour*3600)/60);
double second = t.diff() - hour*3600 - minute*60;
if(rank==0)std::cout << std::fixed << std::setprecision(2) <<std::setfill('0');
if(rank==0)std::cout <<"Computation Time \t"<<hour<<":"<<std::setw(2)<<minute<<":"<<second<<"\n";
if(rank==0)std::cout <<"which is \t"<<t.diff()/p.itstp/p.maxout<<"s/step\n";
nc_close(ncid);
MPI_Finalize();
return 0;
}
| 300f4723e7ccf9adf559ae1fd6d4a04a20c6daad.cu | #include <iostream>
#include <iomanip>
#include <vector>
#include <mpi.h> //activate mpi
#include "netcdf_par.h"
#include "file/nc_utilities.h"
#include "toeflR.cuh"
#include "dg/algorithm.h"
#include "dg/backend/xspacelib.cuh"
#include "parameters.h"
#include "dg/backend/timer.cuh"
/*
- reads parameters from input.txt or any other given file,
- integrates the ToeflR - functor and
- writes outputs to a given outputfile using hdf5.
density fields are the real densities in XSPACE ( not logarithmic values)
*/
int main( int argc, char* argv[])
{
////////////////////////////////setup MPI///////////////////////////////
int provided;
MPI_Init_thread( &argc, &argv, MPI_THREAD_FUNNELED, &provided);
if( provided != MPI_THREAD_FUNNELED)
{
std::cerr << "wrong mpi-thread environment provided!\n";
return -1;
}
int periods[2] = {false, true}; //non-, periodic
int rank, size;
MPI_Comm_rank( MPI_COMM_WORLD, &rank);
MPI_Comm_size( MPI_COMM_WORLD, &size);
#if THRUST_DEVICE_SYSTEM==THRUST_DEVICE_SYSTEM_CUDA
int num_devices=0;
cudaGetDeviceCount(&num_devices);
if(num_devices==0){std::cerr << "No CUDA capable devices found"<<std::endl; return -1;}
int device = rank % num_devices; //assume # of gpus/node is fixed
cudaSetDevice( device);
#endif//cuda
int np[2];
if(rank==0)
{
std::cin>> np[0] >> np[1];
std::cout << "Computing with "<<np[0]<<" x "<<np[1]<<" = "<<size<<std::endl;
assert( size == np[0]*np[1]);
}
MPI_Bcast( np, 2, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm comm;
MPI_Cart_create( MPI_COMM_WORLD, 2, np, periods, true, &comm);
////////////////////////Parameter initialisation//////////////////////////
Json::Reader reader;
Json::Value js;
if( argc != 3)
{
if(rank==0)std::cerr << "ERROR: Wrong number of arguments!\nUsage: "<< argv[0]<<" [inputfile] [outputfile]\n";
return -1;
}
else
{
std::ifstream is(argv[1]);
reader.parse( is, js, false); //read input without comments
}
std::string input = js.toStyledString(); //save input without comments, which is important if netcdf file is later read by another parser
const Parameters p( js);
if(rank==0)p.display( std::cout);
////////////////////////////////set up computations///////////////////////////
dg::MPIGrid2d grid( 0, p.lx, 0, p.ly, p.n, p.Nx, p.Ny, p.bc_x, p.bc_y, comm);
dg::MPIGrid2d grid_out( 0., p.lx, 0.,p.ly, p.n_out, p.Nx_out, p.Ny_out, p.bc_x, p.bc_y, comm);
//create RHS
dg::ToeflR< dg::CartesianMPIGrid2d, dg::MDMatrix, dg::MDVec > test( grid, p);
dg::Diffusion<dg::CartesianMPIGrid2d, dg::MDMatrix, dg::MDVec> diffusion( grid, p.nu);
//////////////////create initial vector///////////////////////////////////////
dg::Gaussian g( p.posX*p.lx, p.posY*p.ly, p.sigma, p.sigma, p.amp);
std::vector<dg::MDVec> y0(2, dg::evaluate( g, grid)), y1(y0); // n_e' = gaussian
dg::blas2::symv( test.gamma(), y0[0], y0[1]); // n_e = \Gamma_i n_i -> n_i = ( 1+alphaDelta) n_e' + 1
{
dg::MDVec v2d = dg::create::inv_weights(grid);
dg::blas2::symv( v2d, y0[1], y0[1]);
}
if( p.equations == "gravity_local" || p.equations == "gravity_global" || p.equations == "drift_global" ){
y0[1] = dg::evaluate( dg::zero, grid);
}
//////////////////////////////////////////////////////////////////////
//////////////////initialisation of timestepper and first step///////////////////
double time = 0;
//dg::AB< k, std::vector<dg::MDVec> > ab( y0);
dg::Karniadakis< std::vector<dg::MDVec> > ab( y0, y0[0].size(), 1e-9);
ab.init( test, diffusion, y0, p.dt);
y0.swap( y1); //y1 now contains value at zero time
/////////////////////////////set up netcdf/////////////////////////////////////
file::NC_Error_Handle err;
int ncid;
MPI_Info info = MPI_INFO_NULL;
err = nc_create_par( argv[2],NC_NETCDF4|NC_MPIIO|NC_CLOBBER,comm,info, &ncid);
err = nc_put_att_text( ncid, NC_GLOBAL, "inputfile", input.size(), input.data());
const int version[3] = {FELTOR_MAJOR_VERSION, FELTOR_MINOR_VERSION, FELTOR_SUBMINOR_VERSION}; //write maybe to json file!?
err = nc_put_att_int( ncid, NC_GLOBAL, "feltor_major_version", NC_INT, 1, &version[0]);
err = nc_put_att_int( ncid, NC_GLOBAL, "feltor_minor_version", NC_INT, 1, &version[1]);
err = nc_put_att_int( ncid, NC_GLOBAL, "feltor_subminor_version", NC_INT, 1, &version[2]);
int dim_ids[3], tvarID;
err = file::define_dimensions( ncid, dim_ids, &tvarID, grid_out.global());
//field IDs
std::string names[4] = {"electrons", "ions", "potential", "vorticity"};
int dataIDs[4];
for( unsigned i=0; i<4; i++){
err = nc_def_var( ncid, names[i].data(), NC_DOUBLE, 3, dim_ids, &dataIDs[i]);}
//energy IDs
int EtimeID, EtimevarID;
err = file::define_time( ncid, "energy_time", &EtimeID, &EtimevarID);
int energyID, massID, dissID, dEdtID;
err = nc_def_var( ncid, "energy", NC_DOUBLE, 1, &EtimeID, &energyID);
err = nc_def_var( ncid, "mass", NC_DOUBLE, 1, &EtimeID, &massID);
err = nc_def_var( ncid, "dissipation", NC_DOUBLE, 1, &EtimeID, &dissID);
err = nc_def_var( ncid, "dEdt", NC_DOUBLE, 1, &EtimeID, &dEdtID);
for(unsigned i=0; i<4; i++)
err = nc_var_par_access( ncid, dataIDs[i], NC_COLLECTIVE);
err = nc_var_par_access( ncid, tvarID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, EtimevarID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, energyID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, massID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, dissID, NC_COLLECTIVE);
err = nc_var_par_access( ncid, dEdtID, NC_COLLECTIVE);
err = nc_enddef(ncid);
///////////////////////////////////first output/////////////////////////
int dims[2], coords[2];
MPI_Cart_get( comm, 2, dims, periods, coords);
size_t count[3] = {1, grid_out.n()*grid_out.Ny(), grid_out.n()*grid_out.Nx()};
size_t start[3] = {0, coords[1]*count[1], coords[0]*count[2]};
size_t Estart[] = {0};
size_t Ecount[] = {1};
dg::MDVec transfer( dg::evaluate(dg::zero, grid));
dg::DVec transferD( dg::evaluate(dg::zero, grid_out.local()));
dg::HVec transferH( dg::evaluate(dg::zero, grid_out.local()));
dg::IDMatrix interpolate = dg::create::interpolation( grid_out.local(), grid.local()); //create local interpolation matrix
for( unsigned i=0; i<2; i++)
{
dg::blas2::gemv( interpolate, y0[i].data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[i], start, count, transferH.data() );
}
//pot
transfer = test.potential()[0];
dg::blas2::gemv( interpolate, transfer.data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[2], start, count, transferH.data() );
//Vor
transfer = test.potential()[0];
dg::blas2::gemv( diffusion.laplacianM(), transfer, y1[1]);
dg::blas2::gemv( interpolate,y1[1].data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[3], start, count, transferH.data() );
err = nc_put_vara_double( ncid, tvarID, start, count, &time);
//err = nc_close(ncid);
///////////////////////////////////////Timeloop/////////////////////////////////
const double mass0 = test.mass(), mass_blob0 = mass0 - grid.lx()*grid.ly();
double E0 = test.energy(), energy0 = E0, E1 = 0, diff = 0;
dg::Timer t;
t.tic();
try
{
#ifdef DG_BENCHMARK
unsigned step = 0;
#endif //DG_BENCHMARK
for( unsigned i=1; i<=p.maxout; i++)
{
#ifdef DG_BENCHMARK
dg::Timer ti;
ti.tic();
#endif//DG_BENCHMARK
for( unsigned j=0; j<p.itstp; j++)
{
ab( test, diffusion, y0);
y0.swap( y1); //attention on -O3 ?
//store accuracy details
{
if(rank==0)std::cout << "(m_tot-m_0)/m_0: "<< (test.mass()-mass0)/mass_blob0<<"\t";
E0 = E1;
E1 = test.energy();
diff = (E1 - E0)/p.dt;
double diss = test.energy_diffusion( );
if(rank==0)std::cout << "(E_tot-E_0)/E_0: "<< (E1-energy0)/energy0<<"\t";
if(rank==0)std::cout << "Accuracy: "<< 2.*(diff-diss)/(diff+diss)<<"\n";
}
time+=p.dt;
Estart[0] += 1;
{
//err = nc_open(argv[2], NC_WRITE, &ncid);
double ener=test.energy(), mass=test.mass(), diff=test.mass_diffusion(), dEdt=test.energy_diffusion();
err = nc_put_vara_double( ncid, EtimevarID, Estart, Ecount, &time);
err = nc_put_vara_double( ncid, energyID, Estart, Ecount, &ener);
err = nc_put_vara_double( ncid, massID, Estart, Ecount, &mass);
err = nc_put_vara_double( ncid, dissID, Estart, Ecount, &diff);
err = nc_put_vara_double( ncid, dEdtID, Estart, Ecount, &dEdt);
//err = nc_close(ncid);
}
}
//////////////////////////write fields////////////////////////
start[0] = i;
for( unsigned j=0; j<2; j++)
{
dg::blas2::gemv( interpolate, y0[j].data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[j], start, count, transferH.data());
}
transfer = test.potential()[0];
dg::blas2::gemv( interpolate, transfer.data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[2], start, count, transferH.data() );
transfer = test.potential()[0];
dg::blas2::gemv( diffusion.laplacianM(), transfer, y1[1]); //correct?
dg::blas2::gemv( interpolate,y1[1].data(), transferD);
dg::blas1::transfer( transferD, transferH);
err = nc_put_vara_double( ncid, dataIDs[3], start, count, transferH.data() );
err = nc_put_vara_double( ncid, tvarID, start, count, &time);
#ifdef DG_BENCHMARK
ti.toc();
step+=p.itstp;
if(rank==0)std::cout << "\n\t Step "<<step <<" of "<<p.itstp*p.maxout <<" at time "<<time;
if(rank==0)std::cout << "\n\t Average time for one step: "<<ti.diff()/(double)p.itstp<<"s\n\n"<<std::flush;
#endif//DG_BENCHMARK
}
}
catch( dg::Fail& fail) {
if(rank==0)std::cerr << "CG failed to converge to "<<fail.epsilon()<<"\n";
if(rank==0)std::cerr << "Does Simulation respect CFL condition?\n";
}
t.toc();
unsigned hour = (unsigned)floor(t.diff()/3600);
unsigned minute = (unsigned)floor( (t.diff() - hour*3600)/60);
double second = t.diff() - hour*3600 - minute*60;
if(rank==0)std::cout << std::fixed << std::setprecision(2) <<std::setfill('0');
if(rank==0)std::cout <<"Computation Time \t"<<hour<<":"<<std::setw(2)<<minute<<":"<<second<<"\n";
if(rank==0)std::cout <<"which is \t"<<t.diff()/p.itstp/p.maxout<<"s/step\n";
nc_close(ncid);
MPI_Finalize();
return 0;
}
|
8e10ed3c68b53191737f4eee262412fb437298e7.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <cstdio>
#include <ctime>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__ void sum_shared_mem(float *array)
{
int idx = threadIdx.x;
float sum=0.0f;
__shared__ float sh_array[512];
sh_array[idx] = array[idx];
__syncthreads();
for (int i=0; i<idx; i++){
sum+= sh_array[i];
}
__syncthreads();
array[idx] = sum;
}
__global__ void sum_global_mem(float *array)
{
int idx = threadIdx.x;
float sum=0.0f;
for (int i=0; i<idx; i++){
sum+= array[i];
}
__syncthreads();
array[idx] = sum;
}
int main(void)
{
std::clock_t start_time;
double duration;
const int ARR_BYTES = 512*sizeof(float);
// Clock start
start_time = std::clock();
// Declare and alloc array on host
float h_array[512];
// initialize input array
for (int i=0; i<512; i++){
h_array[i] = float(i);
}
// Declare and alloc array on device
float *d_array;
hipMalloc((void **) &d_array, ARR_BYTES);
// Transfer to device
hipMemcpy(d_array, h_array, ARR_BYTES, hipMemcpyHostToDevice);
// Call kernel function
hipLaunchKernelGGL(( sum_shared_mem), dim3(1), dim3(512), 0, 0, d_array);
// Transfer results to host
hipMemcpy(h_array, d_array, ARR_BYTES, hipMemcpyDeviceToHost);
// Clock stop
duration = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC;
std::cout<<"Computing time: "<< duration << "s" << std::endl;
// Output results
for(int ii=0; ii<10; ii++){
std::cout<< h_array[ii]<< ", ";
}
std::cout<< std::endl;
return 0;
}
| 8e10ed3c68b53191737f4eee262412fb437298e7.cu | #include <cmath>
#include <cstdio>
#include <ctime>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void sum_shared_mem(float *array)
{
int idx = threadIdx.x;
float sum=0.0f;
__shared__ float sh_array[512];
sh_array[idx] = array[idx];
__syncthreads();
for (int i=0; i<idx; i++){
sum+= sh_array[i];
}
__syncthreads();
array[idx] = sum;
}
__global__ void sum_global_mem(float *array)
{
int idx = threadIdx.x;
float sum=0.0f;
for (int i=0; i<idx; i++){
sum+= array[i];
}
__syncthreads();
array[idx] = sum;
}
int main(void)
{
std::clock_t start_time;
double duration;
const int ARR_BYTES = 512*sizeof(float);
// Clock start
start_time = std::clock();
// Declare and alloc array on host
float h_array[512];
// initialize input array
for (int i=0; i<512; i++){
h_array[i] = float(i);
}
// Declare and alloc array on device
float *d_array;
cudaMalloc((void **) &d_array, ARR_BYTES);
// Transfer to device
cudaMemcpy(d_array, h_array, ARR_BYTES, cudaMemcpyHostToDevice);
// Call kernel function
sum_shared_mem<<<1, 512>>>(d_array);
// Transfer results to host
cudaMemcpy(h_array, d_array, ARR_BYTES, cudaMemcpyDeviceToHost);
// Clock stop
duration = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC;
std::cout<<"Computing time: "<< duration << "s" << std::endl;
// Output results
for(int ii=0; ii<10; ii++){
std::cout<< h_array[ii]<< ", ";
}
std::cout<< std::endl;
return 0;
}
|
16ec394d27eff7c61d587127c217e889b11f3753.hip | // !!! This is a file automatically generated by hipify!!!
//Artifical Neural Network with Cuda and Cublas matrix version
//Ron Patrick - Capstone GVSU - Winter 2017
#include <signal.h>
#include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <thrust/functional.h>
#include <iostream>
#include <string>
#include <algorithm>
#include <bitset>
#include <unistd.h>
#include <vector>
#include <unordered_map>
#include <chrono>
#include <thrust/detail/config.h>
#include <thrust/device_malloc_allocator.h>
#include <thrust/detail/vector_base.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/extrema.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/tuple.h>
//#include <mpi.h>
#include "helper_cuda.h"
#include "helper_string.h"
#include <cmath>
#include <numeric>
#include <limits.h>
#include <float.h>
#include <random>
//#include "imebra/imebra.h"
#include <cublasXt.h>
#include <rocblas.h>
#include <hiprand/hiprand.h>
#include <pthread.h>
#include "cudnn.h"
using namespace std;
using namespace thrust;
using namespace chrono;
using nanoSec = std::chrono::nanoseconds;
#define ULLI unsigned long long int
#define UNCHAR unsigned char
#define INPUT 0
#define OUTPUT 1
#define HIDDEN 2
#ifndef doMNISTprob
#define doMNISTprob true
#endif
#ifndef doBinaryProb
#define doBinaryProb false
#endif
#ifndef BITS
#define BITS 5
#endif
int memoryTracker=0;
bool showCorrectNumTrain=false;
int showInterval=0;
pthread_mutex_t crmutex = PTHREAD_MUTEX_INITIALIZER;
bool threadExit=false;
int waitTime;
static pthread_barrier_t barrier;
static pthread_barrier_t barrier2;
void ReadMNIST_double(string filename, int NumberOfImages, int DataOfAnImage, vector<vector<double>> &arr);
void ReadMNIST_float(string filename, int NumberOfImages, int DataOfAnImage, vector<vector<float>> &arr);
void printTime(high_resolution_clock::time_point start, high_resolution_clock::time_point end);
void print_matrix(device_vector<double> &A, int nr_rows_A, int nr_cols_A);
typedef thrust::tuple<ULLI, ULLI> uTuple;
typedef thrust::tuple<double, double> dTuple;
typedef thrust::tuple<ULLI, double, double> tTuple;
typedef thrust::device_vector<double>::iterator doubleIterator;
typedef thrust::tuple<doubleIterator, doubleIterator> iterTuple;
typedef thrust::zip_iterator<iterTuple> zipIterator;
void ctrlchandler(int sig) {
printf("\nTrying to exit...\n");
threadExit=true;
}
void memTracker(int in, bool printIt) {
memoryTracker+=in;
if(printIt) {
cout << "Cuda memory tracker: Using(bytes): " << memoryTracker << " ";
cout << "(Kb): " << (memoryTracker/1024) << " ";
cout << "(Mb): " << ((memoryTracker/1024)/1024) << endl;
}
}
struct floatToDoubleFunctor : public thrust::unary_function<float,double> {
__device__ double operator()(float t) {
return (double)t;
}
};
struct fix_random_numbers : public thrust::unary_function<double, double> {
__device__ double operator()(double t) {
return (((double)t)*2.0)-1.0;
}
};
struct fix_random_numbers_f : public thrust::unary_function<float, float> {
__device__ float operator()(float t) {
return (((float)t)*2.0f)-1.0f;
}
};
void random_floats(float *A, int rowsA, int colsA) {
hiprandGenerator_t cg;
hiprandCreateGenerator(&cg, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(cg, (unsigned long long) clock());
hiprandGenerateUniform(cg, A, rowsA * colsA);
}
void random_doubles(double *A, int rowsA, int colsA) {
hiprandGenerator_t cg;
hiprandCreateGenerator(&cg, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(cg, (unsigned long long) clock());
hiprandGenerateUniformDouble(cg, A, rowsA * colsA);
}
struct update_w : public thrust::unary_function<int, void> {
double *weights;
double *newW;
double lRate;
update_w(double *w, double *_newW, double lr) : weights(w), newW(_newW), lRate(lr){}
__device__ void operator()(int t) {
double local=weights[t];
double local2=lRate;
double local3=newW[t];
double local4=local-local2*local3;
weights[t]=local4;
//weights[t]=weights[t]-lRate*newW[t];
}
};
struct update_b : public thrust::unary_function<int, void> {
double *biases;
double *newB;
double lRate;
update_b(double *b, double *_newB, double lr) : biases(b), newB(_newB), lRate(lr){}
__device__ void operator()(int t) {
double local=biases[t];
double local2=lRate;
double local3=newB[t];
double local4=local-local2*local3;
biases[t]=local4;
//biases[t]=biases[t]-lRate*newB[t];
}
};
struct update_wf : public thrust::unary_function<int, void> {
float *weights;
float *newW;
float lRate;
update_wf(float *w, float *_newW, float lr) : weights(w), newW(_newW), lRate(lr){}
__device__ void operator()(int t) {
float local=weights[t];
float local2=lRate;
float local3=newW[t];
float local4=local-local2*local3;
weights[t]=local4;
//weights[t]=weights[t]-lRate*newW[t];
}
};
struct update_bf : public thrust::unary_function<int, void> {
float *biases;
float *newB;
float lRate;
update_bf(float *b, float *_newB, float lr) : biases(b), newB(_newB), lRate(lr){}
__device__ void operator()(int t) {
float local=biases[t];
float local2=lRate;
float local3=newB[t];
float local4=local-local2*local3;
biases[t]=local4;
//biases[t]=biases[t]-lRate*newB[t];
}
};
template<typename T>
struct square {
__device__ T operator()(const T& x) const {
return x * x;
}
};
struct sigmoid_devrivative : public thrust::unary_function<double, double> {
__device__ double operator()(double t) {
double tt=1.0/(1.0+exp(-t));
return tt*(1.0-tt);
}
};
struct sigmoid : public thrust::unary_function<double, double> {
sigmoid(){}
__device__ double operator()(double t) {
return 1.0 / (1.0 + exp(-t));
}
};
struct exp_double : public thrust::unary_function<double, double> {
__device__ double operator()(double t) {
return exp(t);
}
};
struct forwardFeed_helper : public thrust::unary_function<int, double> {
double *inputs;
double *biases;
forwardFeed_helper(){}
forwardFeed_helper(double *_inputs, double* _biases) : inputs(_inputs), biases(_biases){}
__device__ double operator()(int t) {
//__device__ double operator()(thrust::tuple<double, double> t) {
double local=inputs[t];
local+=biases[t];
inputs[t]=local;
return 1.0/(1.0+exp(-local));
}
};
struct backProp_helper : public thrust::unary_function<int, double> {
double *innerDelta;
double *inputs;
backProp_helper(){}
backProp_helper(double* _innerDelta, double *_inputs) : innerDelta(_innerDelta), inputs(_inputs){}
__device__ double operator()(int t) {
double local=1.0/(1.0+exp(-inputs[t]));
local=local*(1.0-local);
return innerDelta[t]*local;
}
/*__device__ double operator()(thrust::tuple<double, double> t) {
double local=1.0/(1.0+exp(-thrust::get<0>(t)));
local=local*(1.0-local);
return thrust::get<1>(t)*local;
}*/
};
struct backProp_helper2 : public thrust::unary_function<double, double> {
double *outputs;
double *innerDelta;
backProp_helper2(){}
backProp_helper2(double *_outputs, double* _innerDelta) : innerDelta(_innerDelta), outputs(_outputs){}
__device__ double operator()(int t) {
double local=outputs[t];
local=local*(1.0-local);
return innerDelta[t]*local;
}
/*__device__ double operator()(thrust::tuple<double, double> t) {
double local=thrust::get<0>(t);
local=local*(1.0-local);
return thrust::get<1>(t)*local;
}*/
};
struct output_helper : public thrust::unary_function<int, double> {
double *inputs;
double *outputs;
double *labels;
double *innerDelta;
output_helper(double *_outputs, double *_inputs, double* _innerDelta, double* _labels) : outputs(_outputs), inputs(_inputs), innerDelta(_innerDelta), labels(_labels){}
__device__ double operator()(int t) {
double local=outputs[t]-labels[t];
double local2=1.0/(1.0+exp(-inputs[t]));
local2=local2*(1.0-local2);
return local2*local;
}
};
struct forwardFeed_helperf : public thrust::unary_function<int, float> {
float *inputs;
float *biases;
forwardFeed_helperf(){}
forwardFeed_helperf(float *_inputs, float* _biases) : inputs(_inputs), biases(_biases){}
__device__ float operator()(int t) {
//__device__ float operator()(thrust::tuple<float, float> t) {
float local=inputs[t];
local+=biases[t];
inputs[t]=local;
return 1.0/(1.0+exp(-local));
}
};
struct backProp_helperf : public thrust::unary_function<int, float> {
float *innerDelta;
float *inputs;
backProp_helperf(){}
backProp_helperf(float* _innerDelta, float *_inputs) : innerDelta(_innerDelta), inputs(_inputs){}
__device__ float operator()(int t) {
float local=1.0/(1.0+exp(-inputs[t]));
local=local*(1.0-local);
return innerDelta[t]*local;
}
/*__device__ float operator()(thrust::tuple<float, float> t) {
float local=1.0/(1.0+exp(-thrust::get<0>(t)));
local=local*(1.0-local);
return thrust::get<1>(t)*local;
}*/
};
struct backProp_helper2f : public thrust::unary_function<float, float> {
float *outputs;
float *innerDelta;
backProp_helper2f(){}
backProp_helper2f(float *_outputs, float* _innerDelta) : innerDelta(_innerDelta), outputs(_outputs){}
__device__ float operator()(int t) {
float local=outputs[t];
local=local*(1.0-local);
return innerDelta[t]*local;
}
/*__device__ float operator()(thrust::tuple<float, float> t) {
float local=thrust::get<0>(t);
local=local*(1.0-local);
return thrust::get<1>(t)*local;
}*/
};
struct output_helperf : public thrust::unary_function<int, float> {
float *inputs;
float *outputs;
float *labels;
float *innerDelta;
output_helperf(float *_outputs, float *_inputs, float* _innerDelta, float* _labels) : outputs(_outputs), inputs(_inputs), innerDelta(_innerDelta), labels(_labels){}
__device__ float operator()(int t) {
float local=outputs[t]-labels[t];
float local2=1.0/(1.0+exp(-inputs[t]));
local2=local2*(1.0-local2);
return local2*local;
}
};
class NN_layerf {
public:
device_vector<float> atNeuronOutputs;
device_vector<float> atNeuronInputs;
device_vector<float> weightsMatrix;
device_vector<float> biases;
device_vector<float> outerDeltaB;
device_vector<float> outerDeltaW;
device_vector<float> innerDeltaB;
device_vector<float> innerDeltaW;
NN_layerf(){}
NN_layerf(int sizeThis, int sizeNext, int pType) :
type(pType), thisSize(sizeThis), nextSize(sizeNext) {
allW=thisSize*nextSize;
allN=thisSize*batchSize;
/*if(type!=OUTPUT) {
weightsMatrix=device_vector<float>(allW);
}
if(type!=INPUT) {
biases=device_vector<float>(allN);
}*/
}
NN_layerf(int sizeThis, int sizeNext, int pBatchSize, int pType) :
type(pType), thisSize(sizeThis), nextSize(sizeNext), batchSize(pBatchSize) {
setupLayer(true);
}
void setupLayer(bool newLayer) {
atNeuronOutputs=device_vector<float>(batchSize*thisSize,0.0);
allW=thisSize*nextSize;
allN=thisSize*batchSize;
memTracker(allN*8,false);
counterN=device_vector<int>(allN,0.0);
counterW=device_vector<int>(allW,0.0);
thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(allN),counterN.begin(),counterN.begin(),thrust::plus<int>());
thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(allW),counterW.begin(),counterW.begin(),thrust::plus<int>());
memTracker(allN*sizeof(int),false);
memTracker(allW*sizeof(int),false);
if(newLayer) {
if(type!=INPUT) {
atNeuronInputs=device_vector<float>(batchSize*thisSize,0.0);
memTracker(allN*8,false);
biases=device_vector<float>(thisSize*batchSize,0.0);
memTracker(allN*8*3,false);
outerDeltaB=device_vector<float>(allN,0.0);
innerDeltaB=device_vector<float>(allN,0.0);
} else {
hipFree(&atNeuronInputs);
hipFree(&biases);
hipFree(&outerDeltaB);
hipFree(&innerDeltaB);
}
if(type!=OUTPUT) {
weightsMatrix=device_vector<float>(thisSize*nextSize);
memTracker(allW*8*3,false);
outerDeltaW=vector<float>(allW,0.0);
innerDeltaW=vector<float>(allW,0.0);
random_floats(thrust::raw_pointer_cast(&weightsMatrix[0]),thisSize,nextSize);
thrust::transform(weightsMatrix.begin(),weightsMatrix.end(),weightsMatrix.begin(),fix_random_numbers_f());
cout << "thisSize: " << thisSize << " nextSize: " << nextSize << " thisSize*nextSize: " << (thisSize*nextSize) << endl;
} else {
hipFree(&weightsMatrix);
hipFree(&outerDeltaW);
hipFree(&innerDeltaW);
}
} else {
if(type!=INPUT) {
atNeuronInputs=device_vector<float>(batchSize*thisSize,0.0);
} else {
hipFree(&atNeuronInputs);
hipFree(&biases);
}
if(type==OUTPUT) {
hipFree(&weightsMatrix);
}
}
}
int type, thisSize, nextSize, batchSize, allW, allN;
device_vector<int> counterN;
device_vector<int> counterW;
};
class NN_layer {
public:
device_vector<double> atNeuronOutputs;
device_vector<double> atNeuronInputs;
device_vector<double> weightsMatrix;
device_vector<double> biases;
device_vector<double> outerDeltaB;
device_vector<double> outerDeltaW;
device_vector<double> innerDeltaB;
device_vector<double> innerDeltaW;
NN_layer(){}
NN_layer(int sizeThis, int sizeNext, int pType) :
type(pType), thisSize(sizeThis), nextSize(sizeNext) {
allW=thisSize*nextSize;
allN=thisSize*batchSize;
/*if(type!=OUTPUT) {
weightsMatrix=device_vector<double>(allW);
}
if(type!=INPUT) {
biases=device_vector<double>(allN);
}*/
}
NN_layer(int sizeThis, int sizeNext, int pBatchSize, int pType) :
type(pType), thisSize(sizeThis), nextSize(sizeNext), batchSize(pBatchSize) {
setupLayer(true);
}
void setupLayer(bool newLayer) {
atNeuronOutputs=device_vector<double>(batchSize*thisSize,0.0);
allW=thisSize*nextSize;
allN=thisSize*batchSize;
memTracker(allN*8,false);
counterN=device_vector<int>(allN,0.0);
counterW=device_vector<int>(allW,0.0);
thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(allN),counterN.begin(),counterN.begin(),thrust::plus<int>());
thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(allW),counterW.begin(),counterW.begin(),thrust::plus<int>());
memTracker(allN*sizeof(int),false);
memTracker(allW*sizeof(int),false);
if(newLayer) {
if(type!=INPUT) {
atNeuronInputs=device_vector<double>(batchSize*thisSize,0.0);
memTracker(allN*8,false);
biases=device_vector<double>(thisSize*batchSize,0.0);
memTracker(allN*8*3,false);
outerDeltaB=device_vector<double>(allN,0.0);
innerDeltaB=device_vector<double>(allN,0.0);
} else {
hipFree(&atNeuronInputs);
hipFree(&biases);
hipFree(&outerDeltaB);
hipFree(&innerDeltaB);
}
if(type!=OUTPUT) {
weightsMatrix=device_vector<double>(thisSize*nextSize);
memTracker(allW*8*3,false);
outerDeltaW=vector<double>(allW,0.0);
innerDeltaW=vector<double>(allW,0.0);
random_doubles(thrust::raw_pointer_cast(&weightsMatrix[0]),thisSize,nextSize);
thrust::transform(weightsMatrix.begin(),weightsMatrix.end(),weightsMatrix.begin(),fix_random_numbers());
cout << "thisSize: " << thisSize << " nextSize: " << nextSize << " thisSize*nextSize: " << (thisSize*nextSize) << endl;
} else {
hipFree(&weightsMatrix);
hipFree(&outerDeltaW);
hipFree(&innerDeltaW);
}
} else {
if(type!=INPUT) {
atNeuronInputs=device_vector<double>(batchSize*thisSize,0.0);
} else {
hipFree(&atNeuronInputs);
hipFree(&biases);
}
if(type==OUTPUT) {
hipFree(&weightsMatrix);
}
}
}
int type, thisSize, nextSize, batchSize, allW, allN;
device_vector<int> counterN;
device_vector<int> counterW;
};
struct idLink {
int whichThread;
int interval;
device_vector<double> *data;
device_vector<double> *labels;
vector<NN_layer> *NNlayersQ;
vector<int> *hiddenMatrix;
double learningRate;
int batchSize;
hipblasHandle_t handle;
};
void *fourthThread(void *thread_parm) {
idLink data=*((idLink*) thread_parm);
int myID=data.whichThread;
int myDev=myID;//3-myID;
//if(myID==1){myDev=1;}
if(myDev) {
hipSetDevice(myDev);
hipDeviceEnablePeerAccess(0,0);//hipDeviceEnablePeerAccess ( int peerDevice, unsigned int flags )
} else {
hipDeviceEnablePeerAccess(1,0);
}
//cout << "myID started: " << myID << endl;
hipblasHandle_t handle;//=data.handle;
hipblasCreate(&handle);
int howMany=data.interval;
vector<int> hiddenMatrix=*data.hiddenMatrix;
int layers=hiddenMatrix.size();
int outputsIndex=layers-1;
int batchSize=data.batchSize;
int numOutputs=hiddenMatrix[outputsIndex];
int mOut, ii, mPlus, nextSize, prevSize, thisSize;
device_vector<double> *which;
bool gotTime=false;
high_resolution_clock::time_point startTime, endTime;
int timeCountDown=10;
const double alf = 1;
const double bet = 0;
const double *alpha = &alf;
const double *beta = &bet;
//double toDivideRMS=data.learningRate/(double)batchSize;
while(!threadExit) {
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::fill((*data.NNlayersQ)[ii].outerDeltaB.begin(),(*data.NNlayersQ)[ii].outerDeltaB.end(),0.0);
thrust::fill((*data.NNlayersQ)[i].outerDeltaW.begin(),(*data.NNlayersQ)[i].outerDeltaW.end(),0.0);
}//*/
for(int h=0;h<howMany;++h) {
//cout << "myID: " << myID << " howMany: " << howMany << "\n";
if(!myID && !gotTime && !timeCountDown) {
startTime=high_resolution_clock::now();
}
//forward propagation
which=data.data;
for(int i=0;i<outputsIndex;++i) {
//cout << "myID: " << myID << " here\n";
ii=i+1;
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[ii];
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, nextSize, batchSize, thisSize, alpha, (*data.NNlayersQ)[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, (*data.NNlayersQ)[ii].atNeuronInputs.data().get(), nextSize);
thrust::transform((*data.NNlayersQ)[ii].counterN.begin(),(*data.NNlayersQ)[ii].counterN.end(),(*data.NNlayersQ)[ii].atNeuronOutputs.begin(),forwardFeed_helper((*data.NNlayersQ)[ii].atNeuronInputs.data().get(),(*data.NNlayersQ)[ii].biases.data().get()));
which=&(*data.NNlayersQ)[ii].atNeuronOutputs;
}
//Backward propagation
mOut=outputsIndex-1;
mPlus=outputsIndex;
prevSize=hiddenMatrix[mOut];
thrust::transform((*data.NNlayersQ)[outputsIndex].counterN.begin(),(*data.NNlayersQ)[outputsIndex].counterN.end(),(*data.NNlayersQ)[outputsIndex].innerDeltaB.begin(),output_helper((*data.NNlayersQ)[outputsIndex].atNeuronOutputs.data().get(),(*data.NNlayersQ)[outputsIndex].atNeuronInputs.data().get(),(*data.NNlayersQ)[outputsIndex].innerDeltaB.data().get(),(*data.labels).data().get()));
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, numOutputs, prevSize, batchSize, alpha, (*data.NNlayersQ)[outputsIndex].innerDeltaB.data().get(), numOutputs, (*data.NNlayersQ)[mOut].atNeuronOutputs.data().get(), prevSize, beta, (*data.NNlayersQ)[mOut].innerDeltaW.data().get(), numOutputs);
--mOut;
for(int i=outputsIndex-1;i;--i) {
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[i+1];
prevSize=hiddenMatrix[i-1];
hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, thisSize, batchSize, nextSize, alpha, (*data.NNlayersQ)[i].weightsMatrix.data().get(), nextSize, (*data.NNlayersQ)[i+1].innerDeltaB.data().get(), nextSize, beta, (*data.NNlayersQ)[i].innerDeltaB.data().get(), thisSize);
if(i!=1) {
thrust::transform((*data.NNlayersQ)[i].counterN.begin(),(*data.NNlayersQ)[i].counterN.end(),(*data.NNlayersQ)[i].innerDeltaB.begin(),backProp_helper2((*data.NNlayersQ)[i].atNeuronOutputs.data().get(),(*data.NNlayersQ)[i].innerDeltaB.data().get()));
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, thisSize, prevSize, batchSize, alpha, (*data.NNlayersQ)[i].innerDeltaB.data().get(), thisSize, (*data.NNlayersQ)[i-1].atNeuronOutputs.data().get(), prevSize, beta, (*data.NNlayersQ)[mOut].innerDeltaW.data().get(), thisSize);
} else {
thrust::transform((*data.NNlayersQ)[i].counterN.begin(),(*data.NNlayersQ)[i].counterN.end(),(*data.NNlayersQ)[i].innerDeltaB.begin(),backProp_helper((*data.NNlayersQ)[i].innerDeltaB.data().get(),(*data.NNlayersQ)[i].atNeuronInputs.data().get()));
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, thisSize, prevSize, batchSize, alpha, (*data.NNlayersQ)[i].innerDeltaB.data().get(), thisSize, (*data.data).data().get(), prevSize, beta, (*data.NNlayersQ)[mOut].innerDeltaW.data().get(), thisSize);
}
--mOut;
--mPlus;
}
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::transform((*data.NNlayersQ)[ii].innerDeltaB.begin(),(*data.NNlayersQ)[ii].innerDeltaB.end(),(*data.NNlayersQ)[ii].outerDeltaB.begin(),(*data.NNlayersQ)[ii].outerDeltaB.begin(),thrust::plus<double>());
thrust::transform((*data.NNlayersQ)[i].innerDeltaW.begin(),(*data.NNlayersQ)[i].innerDeltaW.end(),(*data.NNlayersQ)[i].outerDeltaW.begin(),(*data.NNlayersQ)[i].outerDeltaW.begin(),thrust::plus<double>());
}//*/
/*for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::for_each((*data.NNlayersQ)[i].counterW.begin(),(*data.NNlayersQ)[i].counterW.end(),update_w((*data.NNlayersQ)[i].weightsMatrix.data().get(),(*data.NNlayersQ)[i].innerDeltaW.data().get(),toDivideRMS));
thrust::for_each((*data.NNlayersQ)[ii].counterN.begin(),(*data.NNlayersQ)[ii].counterN.end(),update_b((*data.NNlayersQ)[ii].biases.data().get(),(*data.NNlayersQ)[ii].innerDeltaB.data().get(),toDivideRMS));
}//*/
/*for(int i=0;i<outputsIndex;++i) {
thrust::for_each(make_counting_iterator(0),make_counting_iterator((*data.NNlayersQ)[i].allW),update_w((*data.NNlayersQ)[i].weightsMatrix.data().get(),(*data.outerDeltaW)[i].data().get(),toDivideRMS));
thrust::for_each(make_counting_iterator(0),make_counting_iterator((*data.NNlayersQ)[i+1].allN),update_b((*data.NNlayersQ)[i+1].biases.data().get(),(*data.outerDeltaB)[i].data().get(),toDivideRMS));
}//*/
}
if(!myID) {
if(!gotTime) {
if(timeCountDown) {
--timeCountDown;
} else {
endTime=high_resolution_clock::now();
double seconds=duration_cast<microseconds>(endTime-startTime).count()/1000000.0;
printf("Update time interval approximately %.5f seconds apart(%.5f seconds per)\n",(seconds*(double)howMany)+1.0,seconds);
gotTime=true;
}
}
}
//}
//cout << "thread: " << myID << " before barrier one\n";
pthread_barrier_wait(&barrier);
//cout << "thread: " << myID << " after barrier one\n";
pthread_barrier_wait(&barrier2);
//cout << "thread: " << myID << " after barrier two\n";
}
hipblasDestroy(handle);
free(thread_parm);
pthread_exit(0);
}
struct divFour : public thrust::unary_function<double, double> {
double what;
divFour(double _what) : what(_what){}
__device__ double operator()(double t) {
return t/what;
}
};
class neuralNet {
public:
neuralNet(){}
neuralNet(string _inFile) : inFile(_inFile) {
cout << "Setting up network...\n";
hipblasCreate(&handle);
loadState();
cout << "Layers: ";
for(auto h:hiddenMatrix) {
cout << h << " ";
}
cout << "Batch size: " << batchSize << endl << endl;
}
//cublasXtHandle_t handlex;
neuralNet(int _numInputs, int _numOutputs, vector<int> &_hiddenMatrix, int pBatchSize, int _numThreads) :
hiddenMatrix(_hiddenMatrix), RMS(DBL_MAX), minRMS(DBL_MAX), batchSize(pBatchSize) {
//cublasXtCreate(&handlex);
//int dev[2]={0,1};
//cublasXtDeviceSelect(handlex,2,dev);
numThreads=_numThreads;
hipblasCreate(&handle);
numInputs=_numInputs;
numOutputs=_numOutputs;
hiddenMatrix.insert(hiddenMatrix.begin(),numInputs);
hiddenMatrix.push_back(numOutputs);
for(int i=0;i<numThreads;++i) {
NNlayersQ[i]=vector<NN_layer>(hiddenMatrix.size());
}
layers=hiddenMatrix.size();
outputsIndex=layers-1;
cout << "Setting up network...\n";
cout << "Layers: ";
for(auto h:hiddenMatrix) {
cout << h << " ";
}
batchSize=10000;
cout << "Batch size: " << batchSize << endl << endl;
/*int who;
for(int i=3;i>0;--i) {
if(i!=2) {
hipSetDevice(i);
for(int j=3;j>0;--j) {
if(i!=j) {
hipDeviceEnablePeerAccess(j,0);
hipDeviceCanAccessPeer(&who, i, j);//hipDeviceCanAccessPeer(int* canAccessPeer, int device, int peerDevice);
cout << "who returned: " << who << " for device: " << i << " peerDevice: " << j << endl;
}
}
}
}
hipSetDevice(3);//*/
for(int j=0;j<numThreads;++j) {
//hipSetDevice(3-j);
NNlayersQ[j][0]=NN_layer(hiddenMatrix[0],hiddenMatrix[1],batchSize,INPUT);
for(int i=1;i<outputsIndex;++i) {
NNlayersQ[j][i]=NN_layer(hiddenMatrix[i],hiddenMatrix[i+1],batchSize,HIDDEN);
}
NNlayersQ[j][outputsIndex]=NN_layer(hiddenMatrix[outputsIndex],0,batchSize,OUTPUT);
}
for(int i=1;i<numThreads;++i) {
for(int j=0;j<outputsIndex;++j) {
thrust::copy(NNlayersQ[i-1][j].weightsMatrix.begin(),NNlayersQ[i-1][j].weightsMatrix.end(),NNlayersQ[i][j].weightsMatrix.begin());
thrust::copy(NNlayersQ[i-1][j+1].biases.begin(),NNlayersQ[i-1][j+1].biases.end(),NNlayersQ[i][j+1].biases.begin());
}
}
}
void train_Quad(vector<vector<double>> &pData, vector<vector<double>> &pLabels, ULLI maxIter,
float RMSwant, int doDataSetSize, double lRate, vector<vector<double>> &pTestData, vector<vector<double>> &pTestLabels, bool vlRate) {
if(!showInterval) {
showInterval=10;
}
vector<int> bLabels;
for(auto p:pLabels) {
bLabels.push_back(std::max_element(p.begin(), p.end())-p.begin());
}
const double alf = 1;
const double bet = 0;
const double *alpha = &alf;
const double *beta = &bet;
if(lRate<0.0) {
learningRate=0.05;
} else {
learningRate=lRate;
}
dataSetSize=60000;
int testBatchSize=10000;
int batchStart,batchEnd, thisSize, nextSize;
RMSwanted=RMSwant;
maxEpochs=maxIter;
itemSize=pData[0].size();
int testSetSize=pTestData.size();
vector<int> btLabels;
device_vector<double> testData[testSetSize/testBatchSize];
if(testSetSize) {
for(auto p:pTestLabels) {
btLabels.push_back(std::max_element(p.begin(), p.end())-p.begin());
}
}
device_vector<double> data[numThreads][dataSetSize/(batchSize*numThreads)];
device_vector<double> labels[numThreads][dataSetSize/(batchSize*numThreads)];
//Creating pre-made batches so I can simply copy them to layer[0]
cout << "Making batches in memory...\n";
int whichBatch=0;
int iii=0;
int itemsPerThread=dataSetSize/numThreads;
int batchesEach;
//cout << "itemsPerThread: " << itemsPerThread << endl;
for(int itemNum=0;itemNum<dataSetSize;itemNum+=batchSize) {
batchStart=0;
batchEnd=0;
if(((iii+1)*itemsPerThread)==itemNum) {
++iii;
batchesEach=whichBatch;
whichBatch=0;
}
//cout << "iii+1: " << (iii+1) << " itemNum: " << itemNum << " *:" << ((iii+1)*itemsPerThread) << " whichBatch: " << whichBatch << endl;
data[iii][whichBatch]=vector<double>(itemSize*batchSize);
memTracker(itemSize*batchSize*8,false);
labels[iii][whichBatch]=vector<double>(batchSize*numOutputs);
memTracker(numOutputs*batchSize*8,false);
for(int b=0;b<batchSize;++b) {
thrust::copy(pData[itemNum+b].begin(),pData[itemNum+b].end(),data[iii][whichBatch].begin()+batchStart);
thrust::copy(pLabels[itemNum+b].begin(),pLabels[itemNum+b].end(),labels[iii][whichBatch].begin()+batchEnd);
batchStart+=itemSize;
batchEnd+=numOutputs;
}
++whichBatch;
}
whichBatch=0;
for(int itemNum=0;itemNum<testSetSize;itemNum+=testBatchSize) {
testData[whichBatch]=vector<double>(itemSize*testBatchSize);
memTracker(itemSize*testBatchSize*8,false);
batchStart=0;
for(int j=0;j<testBatchSize;++j) {
thrust::copy(pTestData[itemNum+j].begin(),pTestData[itemNum+j].end(),testData[whichBatch].begin()+batchStart);
batchStart+=itemSize;
}
++whichBatch;
}
cout << "Starting training...\n";
device_vector<double>::iterator iter;
int position;
int gotRight=0;
//int numBatches=dataSetSize/batchSize;
//toDivideRMS=learningRate/((double)numBatches*(double)batchSize);
//toDivideRMS=learningRate/((double)batchSize*(double)showInterval);
//toDivideRMS=learningRate/((double)batchSize*(double)num_nodes);//*(double)showInterval);
toDivideRMS=learningRate/(double)batchSize;
//toDivideRMS=learningRate/(double)showInterval;
int maxGotRight=0, maxTestRight=-1, ii;
device_vector<double> *which;
double seconds, totalTime=0.0;
high_resolution_clock::time_point startTime, endTime;
int showIntervalCountDown=showInterval;
//int sInterval=showInterval;
bool once=true;
vector<pthread_t> threads;
pthread_attr_t attr;
cpu_set_t cpus;
pthread_attr_init(&attr);
divFour dThreads((double)numThreads);
//multi_helper hTimes((double)numberOfProcessors);
/*vector<double> tempDeltaB[outputsIndex];
vector<double> tempDeltaW[outputsIndex];
for(int i=0;i<outputsIndex;++i) {
tempDeltaW[i]=vector<double>(NNlayersQ[0][i].allW,0.0);
memTracker(NNlayersQ[0][i].allW*8,false);
tempDeltaB[i]=vector<double>(NNlayersQ[0][i+1].allN,0.0);
memTracker(NNlayersQ[0][i+1].allN*8,false);
}*/
memTracker(0,true);
for(int epochNum=0;!threadExit && epochNum<maxEpochs && maxGotRight!=dataSetSize && maxTestRight!=testSetSize;++epochNum) {//epochNum+=sInterval) {
startTime=high_resolution_clock::now();
if(once) {
for(int j=0;j<numThreads;++j) {
CPU_ZERO(&cpus);
CPU_SET(j, &cpus);
pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
threads.push_back(pthread_t());
idLink *arg = (idLink*)malloc(sizeof(*arg));
(*arg).whichThread=j;
(*arg).data=&data[j][0];
(*arg).labels=&labels[j][0];
(*arg).hiddenMatrix=&hiddenMatrix;
(*arg).interval=batchesEach;
(*arg).NNlayersQ=&NNlayersQ[j];
(*arg).learningRate=learningRate;
(*arg).batchSize=batchSize;
(*arg).handle=handle;
pthread_create(&threads.at(j), &attr, fourthThread, arg);
}
once=false;
}
pthread_barrier_wait(&barrier);
//cout << "all got to here\n";
for(int i=1;i<numThreads;++i) {
for(int j=0;j<outputsIndex;++j) {
ii=j+1;
thrust::transform(NNlayersQ[0][j].outerDeltaW.begin(),NNlayersQ[0][j].outerDeltaW.end(),NNlayersQ[i][j].outerDeltaW.begin(),NNlayersQ[0][j].outerDeltaW.begin(),thrust::plus<double>());
thrust::transform(NNlayersQ[0][ii].outerDeltaB.begin(),NNlayersQ[0][ii].outerDeltaB.end(),NNlayersQ[i][ii].outerDeltaB.begin(),NNlayersQ[0][ii].outerDeltaB.begin(),thrust::plus<double>());
//thrust::transform(NNlayersQ[0][j].innerDeltaW.begin(),NNlayersQ[0][j].innerDeltaW.end(),NNlayersQ[i][j].innerDeltaW.begin(),NNlayersQ[0][j].innerDeltaW.begin(),thrust::plus<double>());
//thrust::transform(NNlayersQ[0][ii].innerDeltaB.begin(),NNlayersQ[0][ii].innerDeltaB.end(),NNlayersQ[i][ii].innerDeltaB.begin(),NNlayersQ[0][ii].innerDeltaB.begin(),thrust::plus<double>());
}
}
/*for(int j=0;j<outputsIndex;++j) {
ii=j+1;
thrust::for_each(NNlayersQ[0][j].counterW.begin(),NNlayersQ[0][j].counterW.end(),update_w(&NNlayersQ[0][j].weightsMatrix[0],&NNlayersQ[0][j].outerDeltaW[0],toDivideRMS));
thrust::for_each(NNlayersQ[0][ii].counterN.begin(),NNlayersQ[0][ii].counterN.end(),update_b(&NNlayersQ[0][ii].biases[0],&NNlayersQ[0][ii].outerDeltaB[0],toDivideRMS));
}//*/
for(int i=0;i<numThreads;++i) {
for(int j=0;j<outputsIndex;++j) {
ii=j+1;
//thrust::for_each(NNlayersQ[i][j].counterW.begin(),NNlayersQ[i][j].counterW.end(),update_w(NNlayersQ[i][j].weightsMatrix.data().get(),NNlayersQ[0][j].innerDeltaW.data().get(),toDivideRMS));
//thrust::for_each(NNlayersQ[i][ii].counterN.begin(),NNlayersQ[i][ii].counterN.end(),update_b(NNlayersQ[i][ii].biases.data().get(),NNlayersQ[0][ii].innerDeltaB.data().get(),toDivideRMS));
//thrust::for_each(NNlayersQ[i][j].counterW.begin(),NNlayersQ[i][j].counterW.end(),update_w(&NNlayersQ[i][j].weightsMatrix[0],&tempDeltaW[j][0],toDivideRMS));
//thrust::for_each(NNlayersQ[i][ii].counterN.begin(),NNlayersQ[i][ii].counterN.end(),update_b(&NNlayersQ[i][ii].biases[0],&tempDeltaB[j][0],toDivideRMS));
thrust::for_each(NNlayersQ[i][j].counterW.begin(),NNlayersQ[i][j].counterW.end(),update_w(NNlayersQ[i][j].weightsMatrix.data().get(),NNlayersQ[i][j].outerDeltaW.data().get(),toDivideRMS));
thrust::for_each(NNlayersQ[i][ii].counterN.begin(),NNlayersQ[i][ii].counterN.end(),update_b(NNlayersQ[i][ii].biases.data().get(),NNlayersQ[i][ii].outerDeltaB.data().get(),toDivideRMS));
}
}//*/
if(showIntervalCountDown) {
--showIntervalCountDown;
endTime=high_resolution_clock::now();
seconds=duration_cast<microseconds>(endTime-startTime).count()/1000000.0;
totalTime+=seconds;
pthread_barrier_wait(&barrier2);
continue;
} else {
showIntervalCountDown=showInterval;
/*for(int i=0;i<outputsIndex;++i) {
ii=i+1;
//MPI_Allreduce(&NNlayersQ[0][i].outerDeltaW[0],&NNlayersQ[1][i].outerDeltaW[0],NNlayersQ[0][i].allW,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
//MPI_Allreduce(&NNlayersQ[0][ii].outerDeltaB[0],&NNlayersQ[1][ii].outerDeltaB[0],NNlayersQ[0][ii].allN,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
//MPI_Allreduce(&NNlayersQ[0][i].innerDeltaW[0],&tempDeltaW[i][0],NNlayersQ[0][i].allW,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
//MPI_Allreduce(&NNlayersQ[0][i+1].innerDeltaB[0],&tempDeltaB[i][0],NNlayersQ[0][i+1].allN,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
thrust::transform(NNlayersQ[0][i].weightsMatrix.begin(),NNlayersQ[0][i].weightsMatrix.end(),NNlayersQ[2][i].weightsMatrix.begin(),hTimes);
thrust::transform(NNlayersQ[0][ii].biases.begin(),NNlayersQ[0][ii].biases.end(),NNlayersQ[2][ii].biases.begin(),hTimes);
//MPI_Allreduce(&NNlayersQ[2][i].weightsMatrix[0],&NNlayersQ[1][i].weightsMatrix[0],NNlayersQ[0][i].allW,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
//MPI_Allreduce(&NNlayersQ[2][ii].biases[0],&NNlayersQ[1][ii].biases[0],NNlayersQ[0][ii].allN,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
thrust::transform(NNlayersQ[1][i].weightsMatrix.begin(),NNlayersQ[1][i].weightsMatrix.end(),NNlayersQ[0][i].weightsMatrix.begin(),dThreads);
thrust::transform(NNlayersQ[1][ii].biases.begin(),NNlayersQ[1][ii].biases.end(),NNlayersQ[0][ii].biases.begin(),dThreads);
}*/
/*for(int i=0;i<numThreads;++i) {
for(int j=0;j<outputsIndex;++j) {
ii=j+1;
//thrust::for_each(NNlayersQ[i][j].counterW.begin(),NNlayersQ[i][j].counterW.end(),update_w(&NNlayersQ[i][j].weightsMatrix[0],&NNlayersQ[1][j].outerDeltaW[0],toDivideRMS));
//thrust::for_each(NNlayersQ[i][ii].counterN.begin(),NNlayersQ[i][ii].counterN.end(),update_b(&NNlayersQ[i][ii].biases[0],&NNlayersQ[1][ii].outerDeltaB[0],toDivideRMS));
thrust::copy(NNlayersQ[0][j].weightsMatrix.begin(),NNlayersQ[0][j].weightsMatrix.end(),NNlayersQ[i][j].weightsMatrix.begin());
thrust::copy(NNlayersQ[0][ii].biases.begin(),NNlayersQ[0][ii].biases.end(),NNlayersQ[i][ii].biases.begin());
}
}*/
}
gotRight=0;
whichBatch=0;
iii=0;
for(int itemNum=0;itemNum<dataSetSize;itemNum+=batchSize) {
if(((iii+1)*itemsPerThread)==itemNum) {
++iii;
whichBatch=0;
}
//forward propagation
which=&data[iii][whichBatch];
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[ii];
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayersQ[0][i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayersQ[0][ii].atNeuronInputs.data().get(), nextSize);
thrust::transform(NNlayersQ[0][ii].counterN.begin(),NNlayersQ[0][ii].counterN.end(),NNlayersQ[0][ii].atNeuronOutputs.begin(),forwardFeed_helper(NNlayersQ[0][ii].atNeuronInputs.data().get(),NNlayersQ[0][ii].biases.data().get()));
which=&NNlayersQ[0][ii].atNeuronOutputs;
}
batchStart=0;
batchEnd=numOutputs;
//printf("\nbatch starting at: %d\n",itemNum);
for(int b=0;b<batchSize;++b) {
iter = thrust::max_element(NNlayersQ[0][outputsIndex].atNeuronOutputs.begin()+batchStart, NNlayersQ[0][outputsIndex].atNeuronOutputs.begin()+batchEnd);
position = iter - NNlayersQ[0][outputsIndex].atNeuronOutputs.begin();
position -= batchStart;
/*printf("output: %d expected: %d\n",position,bLabels[itemNum+b]);
for(int ot=batchStart;ot<batchEnd;++ot) {
double oo=NNlayersQ[0][outputsIndex].atNeuronOutputs[ot];
printf("%.5f ",oo);
}
printf("\n");//*/
if(position==bLabels[itemNum+b]) {
++gotRight;
}
batchStart=batchEnd;
batchEnd+=numOutputs;
}
++whichBatch;
}
if(gotRight>maxGotRight){maxGotRight=gotRight;}
printf("Epoch: %d-Got %d of %d-max right: %d-lRate: %.5f-",epochNum,gotRight,dataSetSize,maxGotRight,learningRate);
gotRight=0;
whichBatch=0;
for(int t=0;t<testSetSize;t+=testBatchSize) {
which=&testData[whichBatch];
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[ii];
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, nextSize, testBatchSize, thisSize, alpha, NNlayersQ[0][i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayersQ[0][ii].atNeuronInputs.data().get(), nextSize);
thrust::transform(NNlayersQ[0][ii].counterN.begin(),NNlayersQ[0][ii].counterN.end(),NNlayersQ[0][ii].atNeuronOutputs.begin(),forwardFeed_helper(NNlayersQ[0][ii].atNeuronInputs.data().get(),NNlayersQ[0][ii].biases.data().get()));
which=&NNlayersQ[0][ii].atNeuronOutputs;
}
batchStart=0;
batchEnd=numOutputs;
//printf("\nbatch starting at: %d\n",t);
for(int b=0;b<testBatchSize;++b) {
iter = thrust::max_element(NNlayersQ[0][outputsIndex].atNeuronOutputs.begin()+batchStart, NNlayersQ[0][outputsIndex].atNeuronOutputs.begin()+batchEnd);
position = iter - NNlayersQ[0][outputsIndex].atNeuronOutputs.begin();
position -= batchStart;
/*printf("output: %d expected: %d\n",position,btLabels[t+b]);
for(int ot=batchStart;ot<batchEnd;++ot) {
double oo=NNlayersQ[0][outputsIndex].atNeuronOutputs[ot];
printf("%.5f ",oo);
}
printf("\n");//*/
if(position==btLabels[t+b]) {
++gotRight;
}
batchStart=batchEnd;
batchEnd+=numOutputs;
}
++whichBatch;
}
if(gotRight>maxTestRight){maxTestRight=gotRight;}
endTime=high_resolution_clock::now();
seconds=duration_cast<microseconds>(endTime-startTime).count()/1000000.0;
totalTime+=seconds;
double errRate=(1.0-((double)gotRight/(double)testSetSize))*100.0;
printf("Test-Got %d of %d-max right: %d-totTime: %.5f-eRate:%.5f perc\n",gotRight,testSetSize,maxTestRight,totalTime,errRate);
if(testSetSize!=gotRight) {
/*for(int i=1;i<numThreads;++i) {
for(int j=0;j<outputsIndex;++j) {
ii=j+1;
thrust::copy(NNlayersQ[0][j].weightsMatrix.begin(),NNlayersQ[0][j].weightsMatrix.end(),NNlayersQ[i][j].weightsMatrix.begin());
thrust::copy(NNlayersQ[0][ii].biases.begin(),NNlayersQ[0][ii].biases.end(),NNlayersQ[i][ii].biases.begin());
}
}*/
} else {
threadExit=true;
}
pthread_barrier_wait(&barrier2);
}
int status;
void * result;
for (int i=0; i < numThreads; ++i) {
if ((status = pthread_join(threads.at(i), &result)) != 0) {
fprintf (stderr, "join error %d: %s\n", status, strerror(status));
}
}
//saveStateQ("MPIv2-");
}
int numThreads;
void saveStateQ(string outFile) {
outFile+="Cuda-"+to_string(dataSetSize);
cout << "Writing weights to file: " << outFile << endl;
ofstream oFile(outFile, ios::binary|ios::out);
if(oFile.is_open()) {
oFile.write((char*)&epoch,sizeof(ULLI));
oFile.write((char*)&layers,sizeof(ULLI));
for(int i=0;i<hiddenMatrix.size();++i) {
oFile.write((char*)&hiddenMatrix[i],sizeof(int));
}
oFile.write((char*)&batchSize,sizeof(int));
oFile.write((char*)&learningRate,sizeof(double));
for(int i=0;i<outputsIndex;++i) {
for(int j=0;j<NNlayersQ[0][i].allW;++j) {
double o=NNlayersQ[0][i].weightsMatrix[j];
oFile.write((char*)&o,sizeof(double));
}
}
for(int i=1;i<layers;++i) {
for(int j=0;j<NNlayersQ[0][i].allN;++j) {
double o=NNlayersQ[0][i].biases[j];
oFile.write((char*)&o,sizeof(double));
}
}
oFile.close();
}
cout << "Done\n";
}
neuralNet(int _numInputs, int _numOutputs, vector<int> &_hiddenMatrix, int pBatchSize) :
hiddenMatrix(_hiddenMatrix), RMS(DBL_MAX), minRMS(DBL_MAX), batchSize(pBatchSize) {
//cublasXtCreate(&handlex);
//int dev[3]={1,2,3};
//cublasXtDeviceSelect(handlex,3,dev);
if(batchSize<100) {
batchSize=10000;
}
hipblasCreate(&handle);
numInputs=_numInputs;
numOutputs=_numOutputs;
hiddenMatrix.insert(hiddenMatrix.begin(),numInputs);
hiddenMatrix.push_back(numOutputs);
NNlayers=vector<NN_layer>(hiddenMatrix.size());
layers=hiddenMatrix.size();
outputsIndex=layers-1;
cout << "Setting up network...\n";
cout << "Layers: ";
for(auto h:hiddenMatrix) {
cout << h << " ";
}
cout << "Batch size: " << batchSize << endl << endl;
NNlayers[0]=NN_layer(hiddenMatrix[0],hiddenMatrix[1],batchSize,INPUT);
for(int i=1;i<outputsIndex;++i) {
NNlayers[i]=NN_layer(hiddenMatrix[i],hiddenMatrix[i+1],batchSize,HIDDEN);
}
NNlayers[outputsIndex]=NN_layer(hiddenMatrix[outputsIndex],0,batchSize,OUTPUT);
}
void train_MatMul(vector<vector<float>> &pData, vector<vector<double>> &pLabels, ULLI maxIter,
float RMSwant, int doDataSetSize, double lRate, vector<vector<float>> &pTestData, vector<vector<double>> &pTestLabels, bool vlRate) {
if(!showInterval) {
showInterval=10;
}
vector<UNCHAR> bLabels;
for(auto p:pLabels) {
bLabels.push_back((UNCHAR)(thrust::max_element(p.begin(), p.end())-p.begin()));
}
if(lRate<0.0) {
learningRate=0.05;
} else {
learningRate=lRate;
}
if(!doDataSetSize) {
doDataSetSize=60000;
}
dataSetSize=doDataSetSize;
const double alf = 1;
const double bet = 0;
const double *alpha = &alf;
const double *beta = &bet;
int batchStart,batchEnd, thisSize, nextSize;
RMSwanted=RMSwant;
maxEpochs=maxIter;
itemSize=pData[0].size();
int testSetSize=pTestData.size();
vector<UNCHAR> btLabels;
device_vector<double> testData[testSetSize/batchSize];
if(testSetSize) {
for(auto p:pTestLabels) {
btLabels.push_back((UNCHAR)(thrust::max_element(p.begin(), p.end())-p.begin()));
}
} else {
hipFree(&testData);
}
int numBatches=dataSetSize/batchSize;
device_vector<double> data[numBatches];
device_vector<double> labels[numBatches];
//float *temp;
//double *tempd;
//ULLI len=pData[0].size();
//ULLI llen=pLabels[0].size();
/*for(int i=0;i<dataSetSize;++i) {
temp=&pData[i][0];
dataTemp[i]=device_vector<float>(temp, temp+len);
tempd=&pLabels[i][0];
labelsTemp[i]=device_vector<double>(tempd, tempd+llen);
}*/
//Creating pre-made batches so I can simply copy them to layer[0]
cout << "Making batches in video memory...\n";
int whichBatch=0;
for(int itemNum=0;itemNum<dataSetSize;itemNum+=batchSize) {
batchStart=0;
batchEnd=0;
data[whichBatch]=device_vector<double>(itemSize*batchSize);
memTracker(itemSize*batchSize*8,false);
labels[whichBatch]=device_vector<double>(batchSize*numOutputs);
memTracker(numOutputs*batchSize*8,false);
for(int b=0;b<batchSize;++b) {
//temp=&pData[itemNum+b][0];
//dataTemp=device_vector<float>(temp, temp+len);
//tempd=&pLabels[itemNum+b][0];
//labelsTemp=device_vector<double>(tempd, tempd+llen);
//thrust::transform(dataTemp[itemNum+b].begin(),dataTemp[itemNum+b].end(),dataTransposeTemp.begin()+batchStart,floatToDoubleFunctor());
//thrust::transform(dataTemp.begin(),dataTemp.end(),data[whichBatch].begin()+batchStart,floatToDoubleFunctor());
//thrust::transform((device_vector<float>(temp, temp+len)).begin(),(device_vector<float>(temp, temp+len)).end(),data[whichBatch].begin()+batchStart,floatToDoubleFunctor());
thrust::copy(pData[itemNum+b].begin(),pData[itemNum+b].end(),data[whichBatch].begin()+batchStart);//,floatToDoubleFunctor());
//thrust::copy(dataTemp[itemNum+b].begin(),dataTemp[itemNum+b].end(),dataTransposeTemp.begin()+batchStart);
//thrust::copy(labelsTemp[itemNum+b].begin(),labelsTemp[itemNum+b].end(),batchLabels.begin()+batchEnd);
//thrust::copy(labelsTemp.begin(),labelsTemp.end(),labels[whichBatch].begin()+batchEnd);
thrust::copy(pLabels[itemNum+b].begin(),pLabels[itemNum+b].end(),labels[whichBatch].begin()+batchEnd);
batchStart+=itemSize;
batchEnd+=numOutputs;
}
//hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, batchSize, numOutputs, alpha, batchLabels.data().get(), numOutputs, beta, batchLabels.data().get(), numOutputs, labels[whichBatch].data().get(), batchSize);
//hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, batchSize, itemSize, alpha, dataTransposeTemp.data().get(), itemSize, beta, dataTransposeTemp.data().get(), itemSize, data[whichBatch].data().get(), batchSize);
++whichBatch;
}
whichBatch=0;
for(int i=0;i<testSetSize;i+=batchSize) {
testData[whichBatch]=device_vector<double>(itemSize*batchSize);
memTracker(itemSize*batchSize*8,false);
batchStart=0;
for(int j=0;j<batchSize;++j) {
//temp=&pTestData[i+j][0];
//dataTemp=device_vector<float>(temp, temp+len);
//tempd=&pTestLabels[i][0];
//labelsTemp=device_vector<double>(tempd, tempd+llen);
//thrust::transform(dataTemp.begin(),dataTemp.end(),testData[whichBatch].begin()+batchStart,floatToDoubleFunctor());
//thrust::transform((device_vector<float>(temp, temp+len)).begin(),(device_vector<float>(temp, temp+len)).end(),testData[i].begin(),floatToDoubleFunctor());
thrust::copy(pTestData[i+j].begin(),pTestData[i+j].end(),testData[whichBatch].begin()+batchStart);
//thrust::copy(labelsTemp.begin(),labelsTemp.end(),testLabels[i].begin());
batchStart+=itemSize;
}
++whichBatch;
}
int mOut=outputsIndex-2;
/*zipIterator begin2[outputsIndex];
zipIterator end2[outputsIndex];
zipIterator begin1[outputsIndex];
zipIterator end1[outputsIndex];
for(int i=outputsIndex-1;i;--i) {
begin2[i]=zipIterator(thrust::make_tuple(NNlayers[i].atNeuronOutputs.begin(), innerDeltaB[mOut].begin()));
end2[i]=zipIterator(thrust::make_tuple(NNlayers[i].atNeuronOutputs.end(), innerDeltaB[mOut].end()));
begin1[i]=zipIterator(thrust::make_tuple(NNlayers[i].atNeuronInputs.begin(), innerDeltaB[mOut].begin()));
end1[i]=zipIterator(thrust::make_tuple(NNlayers[i].atNeuronInputs.end(), innerDeltaB[mOut--].end()));
}
backProp_helper2 backProp2;
backProp_helper backProp;
//zipIterator fBegin[layers];
//zipIterator fEnd[layers];
forwardFeed_helper forwardFeed[layers];
for(int i=1;i<layers;++i) {
forwardFeed[i]=forwardFeed_helper(NNlayers[i].atNeuronInputs.data().get(),NNlayers[i].biases.data().get());
//fBegin[i]=zipIterator(thrust::make_tuple(NNlayers[i].atNeuronInputs.begin(),NNlayers[i].biases.begin()));
//fEnd[i]=zipIterator(thrust::make_tuple(NNlayers[i].atNeuronInputs.end(),NNlayers[i].biases.end()));
}*/
//forwardFeed_helper forwardFeed;
cout << "Starting training...\n";
memTracker(0,true);
//hipFree(&dataTemp);
//hipFree(&labelsTemp);
//hipFree(&dataTransposeTemp);
//hipFree(&batchLabels);
thrust::device_vector<double>::iterator iter;
int position;
int gotRight=0, prevSize;
//toDivideRMS=learningRate;
//toDivideRMS=learningRate/((double)numBatches*(double)batchSize);
toDivideRMS=learningRate/(double)batchSize;
//toDivideRMS=learningRate/(double)numBatches;
int maxGotRight=0, maxTestRight=-1, ii;
device_vector<double> *which;
double origLearningRate=learningRate, seconds, totalTime=0.0;
high_resolution_clock::time_point startTime, endTime;
int showIntervalCountDown=showInterval;
double lastNoShowTime=0.0;
int timeEstCountDown=10, mPlus;
for(int epochNum=0;!threadExit && epochNum<maxEpochs && maxGotRight!=dataSetSize && maxTestRight!=testSetSize;++epochNum) {
whichBatch=0;
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::fill(NNlayers[ii].outerDeltaB.begin(),NNlayers[ii].outerDeltaB.end(),0.0);
thrust::fill(NNlayers[i].outerDeltaW.begin(),NNlayers[i].outerDeltaW.end(),0.0);
}//*/
if(!showIntervalCountDown) {
gotRight=0;
}
startTime=high_resolution_clock::now();
for(int itemNum=0;itemNum<dataSetSize;itemNum+=batchSize) {
//forward propagation
which=&data[whichBatch];
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[ii];
//cublasXtDgemm(handlex, HIPBLAS_OP_N, HIPBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayers[ii].atNeuronInputs.data().get(), nextSize);
//hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayers[ii].atNeuronInputs.data().get(), nextSize);
//thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[ii].allN),NNlayers[ii].atNeuronOutputs.begin(),forwardFeed_helper(NNlayers[ii].atNeuronInputs.data().get(),NNlayers[ii].biases.data().get()));
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayers[ii].atNeuronInputs.data().get(), nextSize);
thrust::transform(NNlayers[ii].counterN.begin(),NNlayers[ii].counterN.end(),NNlayers[ii].atNeuronOutputs.begin(),forwardFeed_helper(NNlayers[ii].atNeuronInputs.data().get(),NNlayers[ii].biases.data().get()));
which=&NNlayers[ii].atNeuronOutputs;
}
//first check how many we got right
if(!showIntervalCountDown) {
batchStart=0;
batchEnd=numOutputs;
//printf("\nbatch starting at: %d\n",itemNum);
for(int b=0;b<batchSize;++b) {
iter = thrust::max_element(NNlayers[outputsIndex].atNeuronOutputs.begin()+batchStart, NNlayers[outputsIndex].atNeuronOutputs.begin()+batchEnd);
position = iter - NNlayers[outputsIndex].atNeuronOutputs.begin();
position -= batchStart;
//printf("output: %d expected: %d\n",position,bLabels[itemNum+b]);
//for(int ot=batchStart;ot<batchEnd;++ot) {
// double oo=NNlayers[outputsIndex].atNeuronOutputs[ot];
// printf("%.5f ",oo);
//}
//printf("\n");
if(position==bLabels[itemNum+b]) {
++gotRight;
}
batchStart=batchEnd;
batchEnd+=numOutputs;
}
}
//Backward propagation
mOut=outputsIndex-1;
mPlus=outputsIndex;
prevSize=hiddenMatrix[mOut];
//which=&innerDeltaB[mOut];
thrust::transform(NNlayers[outputsIndex].counterN.begin(),NNlayers[outputsIndex].counterN.end(),NNlayers[outputsIndex].innerDeltaB.begin(),output_helper(NNlayers[outputsIndex].atNeuronOutputs.data().get(),NNlayers[outputsIndex].atNeuronInputs.data().get(),NNlayers[outputsIndex].innerDeltaB.data().get(),labels[whichBatch].data().get()));
//thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[outputsIndex].allN),(*which).begin(),output_helper(NNlayers[outputsIndex].atNeuronOutputs.data().get(),NNlayers[outputsIndex].atNeuronInputs.data().get(),(*which).data().get(),labels[whichBatch].data().get()));
//thrust::transform(counterBegin,nodesCounterEnd[outputsIndex],innerDeltaB[mOut].begin(),output_helper(NNlayers[outputsIndex].atNeuronOutputs.data().get(),NNlayers[outputsIndex].atNeuronInputs.data().get(),innerDeltaB[mOut].data().get(),labels[whichBatch].data().get()));
//cublasXtDgemm(handlex, HIPBLAS_OP_N, HIPBLAS_OP_T, numOutputs, prevSize, batchSize, alpha, innerDeltaB[mOut].data().get(), numOutputs, NNlayers[mOut].atNeuronOutputs.data().get(), prevSize, beta, innerDeltaW[mOut].data().get(), numOutputs);
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, numOutputs, prevSize, batchSize, alpha, NNlayers[outputsIndex].innerDeltaB.data().get(), numOutputs, NNlayers[mOut].atNeuronOutputs.data().get(), prevSize, beta, NNlayers[mOut].innerDeltaW.data().get(), numOutputs);
//hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, numOutputs, prevSize, batchSize, alpha, (*which).data().get(), numOutputs, NNlayers[mOut].atNeuronOutputs.data().get(), prevSize, beta, innerDeltaW[mOut].data().get(), numOutputs);
--mOut;
for(int i=outputsIndex-1;i;--i) {
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[i+1];
prevSize=hiddenMatrix[i-1];
//which=&innerDeltaB[mOut];
//cublasXtDgemm(handlex, HIPBLAS_OP_T, HIPBLAS_OP_N, thisSize, batchSize, nextSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, innerDeltaB[mOut+1].data().get(), nextSize, beta, innerDeltaB[mOut].data().get(), thisSize);
hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, thisSize, batchSize, nextSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, NNlayers[i+1].innerDeltaB.data().get(), nextSize, beta, NNlayers[i].innerDeltaB.data().get(), thisSize);
//hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, thisSize, batchSize, nextSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, innerDeltaB[mOut+1].data().get(), nextSize, beta, (*which).data().get(), thisSize);
//thrust::for_each(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[i].allW),update_w(NNlayers[i].weightsMatrix.data().get(),innerDeltaW[i].data().get(),toDivideRMS));
////thrust::for_each(counterBegin,weightsCounterEnd[i],update_w(NNlayers[i].weightsMatrix.data().get(),innerDeltaW[i].data().get(),toDivideRMS));
//thrust::for_each(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[mPlus].allN),update_b(NNlayers[mPlus].biases.data().get(),innerDeltaB[i].data().get(),toDivideRMS));
////thrust::for_each(counterBegin,nodesCounterEnd[mPlus],update_b(NNlayers[mPlus].biases.data().get(),innerDeltaB[i].data().get(),toDivideRMS));
//zipIterator begin(thrust::make_tuple(NNlayers[i].atNeuronOutputs.begin(), innerDeltaB[mOut].begin()));
//zipIterator end(thrust::make_tuple(NNlayers[i].atNeuronOutputs.end(), innerDeltaB[mOut].end()));
//thrust::transform(begin,end,innerDeltaB[mOut].begin(),backProp_helper2());
//thrust::transform(begin2[i],end2[i],innerDeltaB[mOut].begin(),backProp2);
if(i!=1) {
thrust::transform(NNlayers[i].counterN.begin(),NNlayers[i].counterN.end(),NNlayers[i].innerDeltaB.begin(),backProp_helper2(NNlayers[i].atNeuronOutputs.data().get(),NNlayers[i].innerDeltaB.data().get()));
//thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[i].allN),(*which).begin(),backProp_helper2(NNlayers[i].atNeuronOutputs.data().get(),(*which).data().get()));
//thrust::transform(counterBegin,nodesCounterEnd[i],innerDeltaB[mOut].begin(),backProp_helper2(NNlayers[i].atNeuronOutputs.data().get(),innerDeltaB[mOut].data().get()));
//cublasXtDgemm(handlex, HIPBLAS_OP_N, HIPBLAS_OP_T, thisSize, prevSize, batchSize, alpha, innerDeltaB[mOut].data().get(), thisSize, NNlayers[i-1].atNeuronOutputs.data().get(), prevSize, beta, innerDeltaW[mOut].data().get(), thisSize);
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, thisSize, prevSize, batchSize, alpha, NNlayers[i].innerDeltaB.data().get(), thisSize, NNlayers[i-1].atNeuronOutputs.data().get(), prevSize, beta, NNlayers[mOut].innerDeltaW.data().get(), thisSize);
//hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, thisSize, prevSize, batchSize, alpha, (*which).data().get(), thisSize, NNlayers[i-1].atNeuronOutputs.data().get(), prevSize, beta, innerDeltaW[mOut].data().get(), thisSize);
} else {
//zipIterator begin(thrust::make_tuple(NNlayers[i].atNeuronInput.begin(), innerDeltaB[mOut].begin()));
//zipIterator end(thrust::make_tuple(NNlayers[i].atNeuronInputs.end(), innerDeltaB[mOut].end()));
//thrust::transform(begin,end,innerDeltaB[mOut].begin(),backProp_helper());
//thrust::transform(begin1[i],end1[i],innerDeltaB[mOut].begin(),backProp);
thrust::transform(NNlayers[i].counterN.begin(),NNlayers[i].counterN.end(),NNlayers[i].innerDeltaB.begin(),backProp_helper(NNlayers[i].innerDeltaB.data().get(),NNlayers[i].atNeuronInputs.data().get()));
//thrust::transform(counterBegin,nodesCounterEnd[i],innerDeltaB[mOut].begin(),backProp_helper(innerDeltaB[mOut].data().get(),NNlayers[i].atNeuronInputs.data().get()));
//cublasXtDgemm(handlex, HIPBLAS_OP_N, HIPBLAS_OP_T, thisSize, prevSize, batchSize, alpha, innerDeltaB[mOut].data().get(), thisSize, data[whichBatch].data().get(), prevSize, beta, innerDeltaW[mOut].data().get(), thisSize);
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, thisSize, prevSize, batchSize, alpha, NNlayers[i].innerDeltaB.data().get(), thisSize, data[whichBatch].data().get(), prevSize, beta, NNlayers[mOut].innerDeltaW.data().get(), thisSize);
}
--mOut;
--mPlus;
}
//thrust::for_each(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[0].allW),update_w(NNlayers[0].weightsMatrix.data().get(),innerDeltaW[0].data().get(),toDivideRMS));
////thrust::for_each(counterBegin,weightsCounterEnd[0],update_w(NNlayers[0].weightsMatrix.data().get(),innerDeltaW[0].data().get(),toDivideRMS));
//thrust::for_each(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[1].allN),update_b(NNlayers[1].biases.data().get(),innerDeltaB[0].data().get(),toDivideRMS));
////thrust::for_each(counterBegin,nodesCounterEnd[1],update_b(NNlayers[1].biases.data().get(),innerDeltaB[0].data().get(),toDivideRMS));
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::transform(NNlayers[ii].innerDeltaB.begin(),NNlayers[ii].innerDeltaB.end(),NNlayers[ii].outerDeltaB.begin(),NNlayers[ii].outerDeltaB.begin(),thrust::plus<double>());
thrust::transform(NNlayers[i].innerDeltaW.begin(),NNlayers[i].innerDeltaW.end(),NNlayers[i].outerDeltaW.begin(),NNlayers[i].outerDeltaW.begin(),thrust::plus<double>());
}//*/
/*for(int i=0;i<outputsIndex;++i) {
thrust::for_each(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[i].allW),update_w(NNlayers[i].weightsMatrix.data().get(),innerDeltaW[i].data().get(),toDivideRMS));
thrust::for_each(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[i+1].allN),update_b(NNlayers[i+1].biases.data().get(),innerDeltaB[i].data().get(),toDivideRMS));
}//*/
++whichBatch;
}
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::for_each(NNlayers[i].counterW.begin(),NNlayers[i].counterW.end(),update_w(NNlayers[i].weightsMatrix.data().get(),NNlayers[i].outerDeltaW.data().get(),toDivideRMS));
thrust::for_each(NNlayers[ii].counterN.begin(),NNlayers[ii].counterN.end(),update_b(NNlayers[ii].biases.data().get(),NNlayers[ii].outerDeltaB.data().get(),toDivideRMS));
}//*/
if(!showIntervalCountDown) {
if(gotRight>maxGotRight){maxGotRight=gotRight;}
printf("Epoch: %d-Got %d of %d-max right: %d-lRate: %.5f",epochNum,gotRight,dataSetSize,maxGotRight,learningRate);
printf("-");
gotRight=0;
whichBatch=0;
for(int t=0;t<testSetSize;t+=batchSize) {
which=&testData[whichBatch];
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[ii];
//cublasXtDgemm(handlex, HIPBLAS_OP_N, HIPBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayers[ii].atNeuronInputs.data().get(), nextSize);
hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayers[ii].atNeuronInputs.data().get(), nextSize);
thrust::transform(NNlayers[ii].counterN.begin(),NNlayers[ii].counterN.end(),NNlayers[ii].atNeuronOutputs.begin(),forwardFeed_helper(NNlayers[ii].atNeuronInputs.data().get(),NNlayers[ii].biases.data().get()));
//thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[ii].allN),NNlayers[ii].atNeuronOutputs.begin(),forwardFeed[ii]);
which=&NNlayers[ii].atNeuronOutputs;
}
if(!showIntervalCountDown) {
batchStart=0;
batchEnd=numOutputs;
//printf("\nbatch starting at: %d\n",t);
for(int b=0;b<batchSize;++b) {
iter = thrust::max_element(NNlayers[outputsIndex].atNeuronOutputs.begin()+batchStart, NNlayers[outputsIndex].atNeuronOutputs.begin()+batchEnd);
position = iter - NNlayers[outputsIndex].atNeuronOutputs.begin();
position -= batchStart;
/*printf("output: %d expected: %d\n",position,btLabels[t+b]);
for(int ot=batchStart;ot<batchEnd;++ot) {
double oo=NNlayers[outputsIndex].atNeuronOutputs[ot];
printf("%.5f ",oo);
}
printf("\n");//*/
if(position==btLabels[t+b]) {
++gotRight;
}
batchStart=batchEnd;
batchEnd+=numOutputs;
}
}
++whichBatch;
}
if(gotRight>maxTestRight && testSetSize){maxTestRight=gotRight;}
}
if(vlRate) {
//if(epochNum>1) {
double cutOff=0.92;
double percLearned=(double)gotRight/(double)testSetSize;
if(percLearned<0.99 && percLearned>cutOff) {
percLearned=1.0-percLearned;
//percLearned=(1.0-percLearned)*2.0;
//percLearned=(1.0-percLearned)/2.0;
//percLearned=pow(1.0-percLearned,(double)layers);
//percLearned=pow(1.0-percLearned,2.0);
//alfLearn=-(percLearned*(learningRate/2.0)+(learningRate/2.0));
learningRate=(cutOff*origLearningRate)+percLearned;//-(percLearned*origLearningRate);
toDivideRMS=learningRate/(double)batchSize;
//toDivideRMS=learningRate/(double)numBatches;
} else {
if(percLearned<0.99) {
learningRate=origLearningRate;
toDivideRMS=learningRate/(double)batchSize;
//toDivideRMS=learningRate/(double)numBatches;
}
}
//}
}
endTime=high_resolution_clock::now();
seconds=duration_cast<microseconds>(endTime-startTime).count()/1000000.0;
totalTime+=seconds;
if(!showIntervalCountDown) {
double errRate=(1.0-((double)gotRight/(double)testSetSize))*100.0;
printf("Test-Got %d of %d-max right: %d-sec: %.5f-totTime: %.5f-errRate: %.5f\n",gotRight,testSetSize,maxTestRight,lastNoShowTime,totalTime,errRate);
showIntervalCountDown=showInterval;
/*if(maxTestRight!=gotRight) {
pthread_mutex_lock(&crmutex);
counterGo=true;
pthread_mutex_unlock(&crmutex);
}*/
} else {
lastNoShowTime=seconds;
--showIntervalCountDown;
if(timeEstCountDown) {
--timeEstCountDown;
if(!timeEstCountDown) {
//printf("(yes it's running...)\n");
printf("Update time interval approximately %.5f seconds apart\n",(lastNoShowTime*(double)showInterval)+5.0);
/*waitTime=(int)(lastNoShowTime*(double)showInterval);
pthread_mutex_lock(&crmutex);
counterGo=true;
pthread_mutex_unlock(&crmutex);*/
}
}
}
}
/*if(showInterval) {
pthread_mutex_lock(&crmutex);
counterExit=true;
pthread_mutex_unlock(&crmutex);
//void *result;
//pthread_join(counter, &result);
}*/
hipblasDestroy(handle);
//cublasXtDestroy(handlex);
//saveState("MPIv-Cuda-");
//sleep(5);
}
neuralNet(int _numInputs, int _numOutputs, vector<int> &_hiddenMatrix, int pBatchSize, bool floata, bool floatb) :
hiddenMatrix(_hiddenMatrix), RMS(DBL_MAX), minRMS(DBL_MAX), batchSize(pBatchSize) {
cout << "in float\n";
hipSetDevice(3);
if(batchSize<100) {
batchSize=10000;
}
hipblasCreate(&handle);
numInputs=_numInputs;
numOutputs=_numOutputs;
hiddenMatrix.insert(hiddenMatrix.begin(),numInputs);
hiddenMatrix.push_back(numOutputs);
NNlayersf=vector<NN_layerf>(hiddenMatrix.size());
layers=hiddenMatrix.size();
outputsIndex=layers-1;
cout << "Setting up network...\n";
cout << "Layers: ";
for(auto h:hiddenMatrix) {
cout << h << " ";
}
cout << "Batch size: " << batchSize << endl << endl;
NNlayersf[0]=NN_layerf(hiddenMatrix[0],hiddenMatrix[1],batchSize,INPUT);
for(int i=1;i<outputsIndex;++i) {
NNlayersf[i]=NN_layerf(hiddenMatrix[i],hiddenMatrix[i+1],batchSize,HIDDEN);
}
NNlayersf[outputsIndex]=NN_layerf(hiddenMatrix[outputsIndex],0,batchSize,OUTPUT);
}
void train_MatMulf(vector<vector<float>> &pData, vector<vector<float>> &pLabels, ULLI maxIter,
float RMSwant, int doDataSetSize, float lRate, vector<vector<float>> &pTestData, vector<vector<float>> &pTestLabels, bool vlRate) {
cout << "in other float\n";
if(!showInterval) {
showInterval=10;
}
vector<UNCHAR> bLabels;
for(auto p:pLabels) {
bLabels.push_back((UNCHAR)(thrust::max_element(p.begin(), p.end())-p.begin()));
}
if(lRate<0.0f) {
learningRatef=0.05f;
} else {
learningRatef=lRate;
}
if(!doDataSetSize) {
doDataSetSize=60000;
}
dataSetSize=doDataSetSize;
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
int batchStart,batchEnd, thisSize, nextSize;
RMSwanted=RMSwant;
maxEpochs=maxIter;
itemSize=pData[0].size();
int testSetSize=pTestData.size();
vector<UNCHAR> btLabels;
device_vector<float> testData[testSetSize/batchSize];
if(testSetSize) {
for(auto p:pTestLabels) {
btLabels.push_back((UNCHAR)(thrust::max_element(p.begin(), p.end())-p.begin()));
}
} else {
hipFree(&testData);
}
int numBatches=dataSetSize/batchSize;
device_vector<float> data[numBatches];
device_vector<float> labels[numBatches];
//Creating pre-made batches so I can simply copy them to layer[0]
cout << "Making batches in video memory...\n";
int whichBatch=0;
for(int itemNum=0;itemNum<dataSetSize;itemNum+=batchSize) {
batchStart=0;
batchEnd=0;
data[whichBatch]=device_vector<float>(itemSize*batchSize);
memTracker(itemSize*batchSize*8,false);
labels[whichBatch]=device_vector<float>(batchSize*numOutputs);
memTracker(numOutputs*batchSize*8,false);
for(int b=0;b<batchSize;++b) {
thrust::copy(pData[itemNum+b].begin(),pData[itemNum+b].end(),data[whichBatch].begin()+batchStart);//,floatToDoubleFunctor());
thrust::copy(pLabels[itemNum+b].begin(),pLabels[itemNum+b].end(),labels[whichBatch].begin()+batchEnd);
batchStart+=itemSize;
batchEnd+=numOutputs;
}
++whichBatch;
}
whichBatch=0;
for(int i=0;i<testSetSize;i+=batchSize) {
testData[whichBatch]=device_vector<float>(itemSize*batchSize);
memTracker(itemSize*batchSize*8,false);
batchStart=0;
for(int j=0;j<batchSize;++j) {
thrust::copy(pTestData[i+j].begin(),pTestData[i+j].end(),testData[whichBatch].begin()+batchStart);
batchStart+=itemSize;
}
++whichBatch;
}
int mOut=outputsIndex-2;
cout << "Starting training...\n";
memTracker(0,true);
thrust::device_vector<float>::iterator iter;
int position;
int gotRight=0, prevSize;
toDivideRMSf=learningRatef/(float)batchSize;
int maxGotRight=0, maxTestRight=-1, ii;
device_vector<float> *which;
float origlearningRatef=learningRatef, seconds, totalTime=0.0f;
high_resolution_clock::time_point startTime, endTime;
int showIntervalCountDown=showInterval;
float lastNoShowTime=0.0f;
int timeEstCountDown=10, mPlus;
for(int epochNum=0;!threadExit && epochNum<maxEpochs && maxGotRight!=dataSetSize && maxTestRight!=testSetSize;++epochNum) {
whichBatch=0;
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::fill(NNlayersf[ii].outerDeltaB.begin(),NNlayersf[ii].outerDeltaB.end(),0.0f);
thrust::fill(NNlayersf[i].outerDeltaW.begin(),NNlayersf[i].outerDeltaW.end(),0.0f);
}//*/
if(!showIntervalCountDown) {
gotRight=0;
}
startTime=high_resolution_clock::now();
for(int itemNum=0;itemNum<dataSetSize;itemNum+=batchSize) {
//forward propagation
which=&data[whichBatch];
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[ii];
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayersf[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayersf[ii].atNeuronInputs.data().get(), nextSize);
thrust::transform(NNlayersf[ii].counterN.begin(),NNlayersf[ii].counterN.end(),NNlayersf[ii].atNeuronOutputs.begin(),forwardFeed_helperf(NNlayersf[ii].atNeuronInputs.data().get(),NNlayersf[ii].biases.data().get()));
which=&NNlayersf[ii].atNeuronOutputs;
}
//first check how many we got right
if(!showIntervalCountDown) {
batchStart=0;
batchEnd=numOutputs;
//printf("\nbatch starting at: %d\n",itemNum);
for(int b=0;b<batchSize;++b) {
iter = thrust::max_element(NNlayersf[outputsIndex].atNeuronOutputs.begin()+batchStart, NNlayersf[outputsIndex].atNeuronOutputs.begin()+batchEnd);
position = iter - NNlayersf[outputsIndex].atNeuronOutputs.begin();
position -= batchStart;
//printf("output: %d expected: %d\n",position,bLabels[itemNum+b]);
//for(int ot=batchStart;ot<batchEnd;++ot) {
// float oo=NNlayersf[outputsIndex].atNeuronOutputs[ot];
// printf("%.5f ",oo);
//}
//printf("\n");
if(position==bLabels[itemNum+b]) {
++gotRight;
}
batchStart=batchEnd;
batchEnd+=numOutputs;
}
}
//Backward propagation
mOut=outputsIndex-1;
mPlus=outputsIndex;
prevSize=hiddenMatrix[mOut];
thrust::transform(NNlayersf[outputsIndex].counterN.begin(),NNlayersf[outputsIndex].counterN.end(),NNlayersf[outputsIndex].innerDeltaB.begin(),output_helperf(NNlayersf[outputsIndex].atNeuronOutputs.data().get(),NNlayersf[outputsIndex].atNeuronInputs.data().get(),NNlayersf[outputsIndex].innerDeltaB.data().get(),labels[whichBatch].data().get()));
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, numOutputs, prevSize, batchSize, alpha, NNlayersf[outputsIndex].innerDeltaB.data().get(), numOutputs, NNlayersf[mOut].atNeuronOutputs.data().get(), prevSize, beta, NNlayersf[mOut].innerDeltaW.data().get(), numOutputs);
--mOut;
for(int i=outputsIndex-1;i;--i) {
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[i+1];
prevSize=hiddenMatrix[i-1];
hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, thisSize, batchSize, nextSize, alpha, NNlayersf[i].weightsMatrix.data().get(), nextSize, NNlayersf[i+1].innerDeltaB.data().get(), nextSize, beta, NNlayersf[i].innerDeltaB.data().get(), thisSize);
if(i!=1) {
thrust::transform(NNlayersf[i].counterN.begin(),NNlayersf[i].counterN.end(),NNlayersf[i].innerDeltaB.begin(),backProp_helper2f(NNlayersf[i].atNeuronOutputs.data().get(),NNlayersf[i].innerDeltaB.data().get()));
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, thisSize, prevSize, batchSize, alpha, NNlayersf[i].innerDeltaB.data().get(), thisSize, NNlayersf[i-1].atNeuronOutputs.data().get(), prevSize, beta, NNlayersf[mOut].innerDeltaW.data().get(), thisSize);
} else {
thrust::transform(NNlayersf[i].counterN.begin(),NNlayersf[i].counterN.end(),NNlayersf[i].innerDeltaB.begin(),backProp_helperf(NNlayersf[i].innerDeltaB.data().get(),NNlayersf[i].atNeuronInputs.data().get()));
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, thisSize, prevSize, batchSize, alpha, NNlayersf[i].innerDeltaB.data().get(), thisSize, data[whichBatch].data().get(), prevSize, beta, NNlayersf[mOut].innerDeltaW.data().get(), thisSize);
}
--mOut;
--mPlus;
}
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::transform(NNlayersf[ii].innerDeltaB.begin(),NNlayersf[ii].innerDeltaB.end(),NNlayersf[ii].outerDeltaB.begin(),NNlayersf[ii].outerDeltaB.begin(),thrust::plus<float>());
thrust::transform(NNlayersf[i].innerDeltaW.begin(),NNlayersf[i].innerDeltaW.end(),NNlayersf[i].outerDeltaW.begin(),NNlayersf[i].outerDeltaW.begin(),thrust::plus<float>());
}//*/
++whichBatch;
}
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::for_each(NNlayersf[i].counterW.begin(),NNlayersf[i].counterW.end(),update_wf(NNlayersf[i].weightsMatrix.data().get(),NNlayersf[i].outerDeltaW.data().get(),toDivideRMSf));
thrust::for_each(NNlayersf[ii].counterN.begin(),NNlayersf[ii].counterN.end(),update_bf(NNlayersf[ii].biases.data().get(),NNlayersf[ii].outerDeltaB.data().get(),toDivideRMSf));
}//*/
if(!showIntervalCountDown) {
if(gotRight>maxGotRight){maxGotRight=gotRight;}
printf("Epoch: %d-Got %d of %d-max right: %d-lRate: %.5f",epochNum,gotRight,dataSetSize,maxGotRight,learningRatef);
printf("-");
gotRight=0;
whichBatch=0;
for(int t=0;t<testSetSize;t+=batchSize) {
which=&testData[whichBatch];
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[ii];
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayersf[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayersf[ii].atNeuronInputs.data().get(), nextSize);
thrust::transform(NNlayersf[ii].counterN.begin(),NNlayersf[ii].counterN.end(),NNlayersf[ii].atNeuronOutputs.begin(),forwardFeed_helperf(NNlayersf[ii].atNeuronInputs.data().get(),NNlayersf[ii].biases.data().get()));
which=&NNlayersf[ii].atNeuronOutputs;
}
if(!showIntervalCountDown) {
batchStart=0;
batchEnd=numOutputs;
//printf("\nbatch starting at: %d\n",t);
for(int b=0;b<batchSize;++b) {
iter = thrust::max_element(NNlayersf[outputsIndex].atNeuronOutputs.begin()+batchStart, NNlayersf[outputsIndex].atNeuronOutputs.begin()+batchEnd);
position = iter - NNlayersf[outputsIndex].atNeuronOutputs.begin();
position -= batchStart;
/*printf("output: %d expected: %d\n",position,btLabels[t+b]);
for(int ot=batchStart;ot<batchEnd;++ot) {
float oo=NNlayersf[outputsIndex].atNeuronOutputs[ot];
printf("%.5f ",oo);
}
printf("\n");//*/
if(position==btLabels[t+b]) {
++gotRight;
}
batchStart=batchEnd;
batchEnd+=numOutputs;
}
}
++whichBatch;
}
if(gotRight>maxTestRight && testSetSize){maxTestRight=gotRight;}
}
endTime=high_resolution_clock::now();
seconds=duration_cast<microseconds>(endTime-startTime).count()/1000000.0f;
totalTime+=seconds;
if(!showIntervalCountDown) {
float errRate=(1.0f-((float)gotRight/(float)testSetSize))*100.0f;
printf("Test-Got %d of %d-max right: %d-sec: %.5f-totTime: %.5f-errRate: %.5f\n",gotRight,testSetSize,maxTestRight,lastNoShowTime,totalTime,errRate);
showIntervalCountDown=showInterval;
} else {
lastNoShowTime=seconds;
--showIntervalCountDown;
if(timeEstCountDown) {
--timeEstCountDown;
if(!timeEstCountDown) {
printf("Update time interval approximately %.5f seconds apart\n",(lastNoShowTime*(float)showInterval)+5.0f);
}
}
}
}
hipblasDestroy(handle);
}
void saveState(string outFile) {
outFile+="-"+to_string(dataSetSize);
cout << "Writing weights to file: " << outFile << endl;
ofstream oFile(outFile, ios::binary|ios::out);
if(oFile.is_open()) {
oFile.write((char*)&epoch,sizeof(ULLI));
oFile.write((char*)&layers,sizeof(ULLI));
for(int i=0;i<hiddenMatrix.size();++i) {
oFile.write((char*)&hiddenMatrix[i],sizeof(int));
}
oFile.write((char*)&batchSize,sizeof(int));
oFile.write((char*)&learningRate,sizeof(double));
for(int i=0;i<outputsIndex;++i) {
for(int j=0;j<NNlayers[i].allW;++j) {
double o=NNlayers[i].weightsMatrix[j];
oFile.write((char*)&o,sizeof(double));
}
}
for(int i=1;i<layers;++i) {
for(int j=0;j<NNlayers[i].allN;++j) {
double o=NNlayers[i].biases[j];
oFile.write((char*)&o,sizeof(double));
}
}
oFile.close();
}
cout << "Done\n";
}
void loadState() {
cout << "Reading weights from file: " << inFile << endl;
ifstream oFile(inFile, ios::binary|ios::in);
if(oFile.is_open()) {
oFile.read((char*)&epoch,sizeof(ULLI));
oFile.read((char*)&layers,sizeof(ULLI));
hiddenMatrix.clear();
for(int i=0;i<layers;++i) {
int l=0;
oFile.read((char*)&l,sizeof(int));
hiddenMatrix.push_back(l);
}
oFile.read((char*)&batchSize,sizeof(int));
oFile.read((char*)&learningRate,sizeof(double));
outputsIndex=layers-1;
numInputs=hiddenMatrix[0]-1;
numOutputs=hiddenMatrix[outputsIndex];
NNlayers.clear();
int type=INPUT;
for(int i=0;i<outputsIndex;++i) {
if(i){type=HIDDEN;}
NNlayers.push_back(NN_layer(hiddenMatrix[i],hiddenMatrix[i+1],batchSize,type));
for(int j=0;j<NNlayers[i].allW;++j) {
double o=0.0;
oFile.read((char*)&o,sizeof(double));
NNlayers[i].weightsMatrix.push_back(o);
}
NNlayers[i].setupLayer(false);
}
NNlayers.push_back(NN_layer(hiddenMatrix[outputsIndex],0,batchSize,OUTPUT));
NNlayers.back().setupLayer(false);
for(int i=1;i<layers;++i) {
for(int j=0;j<NNlayers[i].allN;++j) {
double o=0.0;
oFile.read((char*)&o,sizeof(double));
NNlayers[i].biases.push_back(o);
}
}
oFile.close();
}
cout << "Done\n";
}
vector<NN_layer> NNlayers;
vector<NN_layerf> NNlayersf;
vector<NN_layer> NNlayersQ[2];
private:
ULLI epoch, maxElement, layers, maxEpochs;//, maxWeightsMatrix, maxDeltaMatrix;
int outputsIndex, dataSetSize, numInputs, numOutputs, batchSize;
double RMS, minRMS, toDivideRMS, RMSwanted, learningRate;
float toDivideRMSf, learningRatef;
vector<int> hiddenMatrix;
hipblasHandle_t handle;
ULLI itemSize;
string inFile;
ULLI neededEpochs;
vector<vector<double>> neuralNet_weights_host;
};
void doMain(vector<int> &inputHiddenLayers, int batchSize, int doDataSetSize, double lRate, string inFile, string outFile, bool vlRate, int numDevs) {
if(doMNISTprob) {
vector<int> hiddenMatrix;
if(!inputHiddenLayers.size()) {
hiddenMatrix.push_back(200);
hiddenMatrix.push_back(100);
//hiddenMatrix.push_back(10);
//hiddenMatrix.push_back(784+(784/2));
//hiddenMatrix.push_back(784+(784/2));
//hiddenMatrix.push_back(784);
//hiddenMatrix.push_back(784);
} else {
for(auto h:inputHiddenLayers) {
hiddenMatrix.push_back(h);
}
}
vector<vector<double>> testData(10000);
ReadMNIST_double("t10k-images.idx3-ubyte",10000,784,testData);
vector<vector<double>> trainData(60000);
ReadMNIST_double("train-images.idx3-ubyte",60000,784,trainData);//*/
vector<vector<float>> testDataf(10000);
ReadMNIST_float("t10k-images.idx3-ubyte",10000,784,testDataf);
vector<vector<float>> trainDataf(60000);
ReadMNIST_float("train-images.idx3-ubyte",60000,784,trainDataf);//*/
vector<vector<double>> testLabels(10000);
vector<vector<double>> trainLabels(60000);
vector<vector<float>> testLabelsf(10000);
vector<vector<float>> trainLabelsf(60000);
//vector<UNCHAR> testLabels2;//(10000);
//vector<UNCHAR> trainLabels2;//(60000);
ifstream file("t10k-labels.idx1-ubyte",ios::binary);
if(file.is_open()) {
int placeHolder=0;
file.read((char*)&placeHolder,sizeof(placeHolder));
file.read((char*)&placeHolder,sizeof(placeHolder));
for(int i=0;i<10000;++i) {
testLabels[i]=vector<double>(10,0.0);
testLabelsf[i]=vector<float>(10,0.0f);
//testLabels[i]=vector<float>(10,0.0f);
UNCHAR temp=0;
file.read((char*)&temp,1);
for(UNCHAR j=0;j<10;++j) {
if(j==temp) {
//testLabels[i].push_back(1.0);
//testLabels[i][j]=1.0f;
testLabels[i][j]=1.0;
testLabelsf[i][j]=1.0f;
//testLabels2.push_back(temp);
} /*else {
//testLabels[i].push_back(0.0);
testLabels[i][j]=0.0;
}*/
}
}
file.close();
}
//cout << "testLabels2 size: " << testLabels2.size() << endl;
ifstream file2("train-labels.idx1-ubyte",ios::binary);
if(file2.is_open()) {
int placeHolder=0;
file2.read((char*)&placeHolder,sizeof(placeHolder));
file2.read((char*)&placeHolder,sizeof(placeHolder));
for(int i=0;i<60000;++i) {
trainLabels[i]=vector<double>(10,0.0);
trainLabelsf[i]=vector<float>(10,0.0f);
//trainLabels[i]=vector<float>(10,0.0f);
UNCHAR temp=0;
file2.read((char*)&temp,1);
for(UNCHAR j=0;j<10;++j) {
if(j==temp) {
//trainLabels[i].push_back(1.0);
//trainLabels[i][j]=1.0f;
trainLabels[i][j]=1.0;
trainLabelsf[i][j]=1.0f;
//trainLabels2.push_back(temp);
} /*else {
//trainLabels[i].push_back(0.0);
trainLabels[i][j]=0.0;
}*/
}
}
file2.close();
}
//cout << "trainLabels2 size: " << trainLabels2.size() << endl;
//vector<UNCHAR> temp;
//for(auto p:trainData[1]) {
// temp.push_back((UNCHAR)(p*255.0f));
// cout << (int)temp.back() << endl;
//}
//UNCHAR* t=&temp[0];
//intarray2bmp::intarray2bmp("outputtest.bmp",t,(UNCHAR)28,(UNCHAR)28,(UNCHAR)0,(UNCHAR)255);
neuralNet go;
if(inFile=="") {
//go=neuralNet(784,10,hiddenMatrix,batchSize,numDevs);
//go=neuralNet(784,10,hiddenMatrix,batchSize);
go=neuralNet(784,10,hiddenMatrix,batchSize,true,true);
} else {
go=neuralNet(inFile);
}
auto start = high_resolution_clock::now();
//go.train_floats(trainData,trainLabels,1000000,0.0001,trainLabels2);
//go.train(trainData,trainLabels,1000000,0.0001,trainLabels2, doDataSetSize);//*/
//go.train_Quad(trainData,trainLabels, 1000000, 0.0001, doDataSetSize, lRate, testData, testLabels, vlRate);//*/
go.train_MatMulf(trainDataf,trainLabelsf, 1000000, 0.0001, doDataSetSize, lRate, testDataf, testLabelsf, vlRate);//*/
//go.train_MatMul(trainData,trainLabels, 1000000, 0.0001, doDataSetSize, lRate, testData, testLabels, vlRate);//*/
//go.evaluate(testData,testLabels,testLabels2, doDataSetSize);
auto endTime = high_resolution_clock::now();
printTime(start,endTime);
}
if(doBinaryProb) {
vector<int> hiddenMatrix;
hiddenMatrix.push_back(BITS+(BITS/2));
//hiddenMatrix.push_back(BITS+(BITS/2));
for(int i=0;i<1;++i) {
hiddenMatrix.push_back(BITS+(BITS/2));
//hiddenMatrix.push_back(12);
}
//vector<vector<neuron_t>> countingTest;
//vector<vector<double>> countingLabels;
int size=pow(2,BITS);
neuralNet test(BITS,BITS,hiddenMatrix,batchSize,numDevs);
vector<vector<double>> countingTest;
vector<vector<double>> countingLabels;
for(int i=0;i<size;++i) {
countingTest.push_back(vector<double>(BITS));
countingLabels.push_back(vector<double>(BITS,0.0));
//countingLabels[i]=vector<double>(BITS,0.0);
//countingTest[i]=vector<neuron_t>(BITS);
for(int j=0;j<BITS;++j) {
//countingTest.back()[j].output=(double)bitset<BITS>(i)[(BITS-1)-j];
//countingLabels.back()[j]=(double)bitset<BITS>((i+1)%size)[(BITS-1)-j];
countingTest[i][j]=(double)bitset<BITS>(i)[(BITS-1)-j];
countingLabels[i][j]=(double)bitset<BITS>((i+1)%size)[(BITS-1)-j];
}
}
test.train_Quad(countingTest,countingLabels,1000000,0.00001,size,lRate,countingTest,countingLabels,vlRate);
}
}
int main(int argc, char *argv[]) {
/*hipSetDevice(1);
hipDeviceReset();
hipSetDevice(2);
hipDeviceReset();
return 0;*/
struct sigaction ctrlc;
ctrlc.sa_handler=ctrlchandler;
ctrlc.sa_flags=0;
sigemptyset(&ctrlc.sa_mask);
sigaction(SIGQUIT,&ctrlc,NULL);
string inFile="";
string outFile="";
int doDataSetSize=0;
int batchSize=5;
if(doBinaryProb) {
batchSize=4;
}
double lRate=-1.0;
bool vlRate=false;
if(!vlRate){}
vector<int> inputHiddenLayers;
showInterval=0;
for(int i=1;i<argc;++i) {
string temp=string(argv[i]);
if(temp.find("showInterval=")!=string::npos) {
sscanf(argv[i],"showInterval=%d",&showInterval);
continue;
}
if(temp.find("showTrain")!=string::npos) {
showCorrectNumTrain=true;
continue;
}
if(temp.find("vlRate")!=string::npos) {
vlRate=true;
}
if(temp.find("outWeights=")!=string::npos) {
outFile=temp.substr(11,temp.size());
continue;
}
if(temp.find("inWeights=")!=string::npos) {
inFile=temp.substr(10,temp.size());
continue;
}
if(temp.find("setSize=")!=string::npos) {
sscanf(argv[i],"setSize=%d",&doDataSetSize);
continue;
}
if(temp.find("batchSize=")!=string::npos) {
sscanf(argv[i],"batchSize=%d",&batchSize);
continue;
}
if(temp.find("learningRate=")!=string::npos) {
sscanf(argv[i],"learningRate=%lf",&lRate);
continue;
}
if(temp.find("layers=")!=string::npos) {
temp.erase(0,7);
int where;
int what=1;
while(what) {
if(temp.find(",")!=string::npos) {
where=temp.find(",");
string temp2=string(temp.begin(),temp.begin()+where);
sscanf(temp2.c_str(),"%d",&what);
inputHiddenLayers.push_back(what);
temp.erase(0,where+1);
} else {
what=0;
}
}
sscanf(temp.c_str(),"%d",&what);
inputHiddenLayers.push_back(what);
}
}
//Cuda doesn't like this first one
//srandom(time_point_cast<nanoSec>(high_resolution_clock::now()).time_since_epoch().count());
srand((unsigned int)time_point_cast<nanoSec>(high_resolution_clock::now()).time_since_epoch().count());
/*int my_rank, num_nodes;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &num_nodes);
char my_host[100];
gethostname(my_host, 100);
string hostname=string(my_host);
printf("%s\n",hostname.c_str());*/
/*int deviceCount = 0;
size_t mem_tot_0 = 0;
size_t mem_free_0 = 0;*/
int numDevs=2;
pthread_barrier_init(&barrier, NULL, numDevs+1);
pthread_barrier_init(&barrier2, NULL, numDevs+1);
/*ULLI totalCudaMem=0;
size_t totalFreeCudaMem;
int device_num;
//This code is from deviceQuery.cpp as seen in /usr/local/cuda-8.0/samples
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if(deviceCount) {
hipGetDevice(&device_num);
hipMemGetInfo(&mem_free_0, & mem_tot_0);
totalFreeCudaMem=mem_free_0;
ULLI dmask=1;
ULLI maxDiv=1;
for(int i=0;i<sizeof(ULLI)*8;++i) {
if(dmask&totalFreeCudaMem) {
maxDiv=dmask/2;
}
dmask<<=1;
}
maxDiv/=8;
}
int dev=0;
for (dev = 0; dev < deviceCount; ++dev) {
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
/*printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
if(!dev) {
char msg[256];
sprintf(msg, " Total amount of global memory: %.0f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/1048576.0f,
(ULLI) deviceProp.totalGlobalMem);
totalCudaMem=(ULLI)deviceProp.totalGlobalMem;
printf("%s", msg);
printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n",
deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
printf(" GPU Max Clock rate: %.0f MHz (%0.2f GHz)\n\n",
deviceProp.clockRate * 1e-3f,
deviceProp.clockRate * 1e-6f);
printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
}
hipMemGetInfo(&mem_free_0, &mem_tot_0);
cout << "Total free cuda memory: " << mem_free_0 << endl;*/
//}
//if(totalCudaMem) {
//cout << string(my_host) << ": total Cuda Memory: " << totalCudaMem << endl;
//cout << "Total Cuda Memory: " << totalCudaMem << endl;
//}
//}
hipSetDevice(0);
//cuda thread test
/*
double test=(double)doDataSetSize/4.0;
int test2=(int)test;
if((double)test2!=test) {
cout << "setSize must be divisible by four\n";
MPI_Finalize();
return 0;
}
device_vector<double> data(25,0.0);
device_vector<double> labels(25,0.0);
hipMemGetInfo(&mem_free_0, &mem_tot_0);
//totalFreeCudaMem=mem_free_0;
cout << "Total free cuda memory: " << mem_free_0 << endl;
//random_doubles(thrust::raw_pointer_cast(&data[0]),5,5);
//random_doubles(thrust::raw_pointer_cast(&labels[0]),5,5);
for(int i=0;i<25;++i) {
data[i]=(double)i;
labels[i]=(double)i;
}
int index=0;
int index2=0;
int testI=2500;
vector<pthread_t> threads;
for(int j=0;j<deviceCount;++j) {
threads.push_back(pthread_t());
idLink *arg = (idLink*)malloc(sizeof(*arg));
(*arg).whichThread=j;
(*arg).batchStart=index;
index+=test2;
(*arg).batchEnd=index;
(*arg).testStart=index2;
index2+=testI;
(*arg).testEnd=index2;
(*arg).data=&data;
(*arg).labels=&labels;
pthread_create (&threads.at(j), NULL, cudaThread, arg);
}
hipMemGetInfo(&mem_free_0, &mem_tot_0);
//totalFreeCudaMem=mem_free_0;
cout << "Total free cuda memory: " << mem_free_0 << endl;
int status;
void * result;
for (int i=0; i < deviceCount; ++i) {
if ((status = pthread_join(threads.at(i), &result)) != 0) {
fprintf (stderr, "join error %d: %s\n", status, strerror(status));
exit (1);
}
}*/
doMain(inputHiddenLayers, batchSize, doDataSetSize, lRate, inFile, outFile, vlRate, numDevs);
//MPI_Finalize();
//doMain(0,"",0);
return 0;
}
int ReverseInt(int i) {
UNCHAR ch1, ch2, ch3, ch4;
ch1=i&255;
ch2=(i>>8)&255;
ch3=(i>>16)&255;
ch4=(i>>24)&255;
return((int)ch1<<24)+((int)ch2<<16)+((int)ch3<<8)+ch4;
}
void ReadMNIST_double(string filename, int NumberOfImages, int DataOfAnImage, vector<vector<double>> &arr) {
arr.resize(NumberOfImages,vector<double>(DataOfAnImage));
ifstream file(filename,ios::binary);
if (file.is_open()) {
int magic_number=0;
int number_of_images=0;
int n_rows=0;
int n_cols=0;
file.read((char*)&magic_number,sizeof(magic_number));
magic_number= ReverseInt(magic_number);
file.read((char*)&number_of_images,sizeof(number_of_images));
number_of_images= ReverseInt(number_of_images);
file.read((char*)&n_rows,sizeof(n_rows));
n_rows= ReverseInt(n_rows);
file.read((char*)&n_cols,sizeof(n_cols));
n_cols= ReverseInt(n_cols);
for(int i=0;i<number_of_images;++i) {
arr[i]=vector<double>();
for(int r=0;r<n_rows;++r) {
for(int c=0;c<n_cols;++c) {
UNCHAR temp=0;
file.read((char*)&temp,sizeof(temp));
//arr[i][(n_rows*r)+c]= ((float)temp)/256.0f;
//cout << "from read: " << ((float)temp)/256.0f << ": ";
arr[i].push_back(((double)temp)/256.0);
//arr[i].push_back((float)temp);
//cout << "from arr: " << arr[i].back() << endl;
}
}
}
}
file.close();
}
void ReadMNIST_float(string filename, int NumberOfImages, int DataOfAnImage, vector<vector<float>> &arr) {
arr.resize(NumberOfImages,vector<float>(DataOfAnImage));
ifstream file(filename,ios::binary);
if (file.is_open()) {
int magic_number=0;
int number_of_images=0;
int n_rows=0;
int n_cols=0;
file.read((char*)&magic_number,sizeof(magic_number));
magic_number= ReverseInt(magic_number);
file.read((char*)&number_of_images,sizeof(number_of_images));
number_of_images= ReverseInt(number_of_images);
file.read((char*)&n_rows,sizeof(n_rows));
n_rows= ReverseInt(n_rows);
file.read((char*)&n_cols,sizeof(n_cols));
n_cols= ReverseInt(n_cols);
for(int i=0;i<number_of_images;++i) {
arr[i]=vector<float>();
for(int r=0;r<n_rows;++r) {
for(int c=0;c<n_cols;++c) {
UNCHAR temp=0;
file.read((char*)&temp,sizeof(temp));
//arr[i][(n_rows*r)+c]= ((float)temp)/256.0f;
//cout << "from read: " << ((float)temp)/256.0f << ": ";
arr[i].push_back(((float)temp)/256.0f);
//arr[i].push_back((float)temp);
//cout << "from arr: " << arr[i].back() << endl;
}
}
}
}
file.close();
}
void printTime(high_resolution_clock::time_point start, high_resolution_clock::time_point end) {
double seconds=duration_cast<microseconds>(end-start).count()/1000000.0;
cout << "Processing time (milliseconds): " << duration_cast<milliseconds>(end - start).count() << endl;
cout << "Processing time (microseconds): " << duration_cast<microseconds>(end - start).count() << endl;
cout << "Processing time (nanoseconds): " << duration_cast<nanoseconds>(end - start).count() << endl;
printf("Processing time (seconds): %.04f\n",seconds);
}
void print_matrix(device_vector<double> &A, int nr_rows_A, int nr_cols_A) {
for(int i = 0; i < nr_rows_A; ++i){
for(int j = 0; j < nr_cols_A; ++j){
//cout << A[j * nr_rows_A + i] << " ";
double o=A[j*nr_rows_A+i];
printf("%.4f ",o);
//printf("%.10f ",A[j*nr_rows_A+i]);
}
cout << endl;
}
//cout << endl;
}
| 16ec394d27eff7c61d587127c217e889b11f3753.cu | //Artifical Neural Network with Cuda and Cublas matrix version
//Ron Patrick - Capstone GVSU - Winter 2017
#include <signal.h>
#include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <thrust/functional.h>
#include <iostream>
#include <string>
#include <algorithm>
#include <bitset>
#include <unistd.h>
#include <vector>
#include <unordered_map>
#include <chrono>
#include <thrust/detail/config.h>
#include <thrust/device_malloc_allocator.h>
#include <thrust/detail/vector_base.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/extrema.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/tuple.h>
//#include <mpi.h>
#include "helper_cuda.h"
#include "helper_string.h"
#include <cmath>
#include <numeric>
#include <limits.h>
#include <float.h>
#include <random>
//#include "imebra/imebra.h"
#include <cublasXt.h>
#include <cublas_v2.h>
#include <curand.h>
#include <pthread.h>
#include "cudnn.h"
using namespace std;
using namespace thrust;
using namespace chrono;
using nanoSec = std::chrono::nanoseconds;
#define ULLI unsigned long long int
#define UNCHAR unsigned char
#define INPUT 0
#define OUTPUT 1
#define HIDDEN 2
#ifndef doMNISTprob
#define doMNISTprob true
#endif
#ifndef doBinaryProb
#define doBinaryProb false
#endif
#ifndef BITS
#define BITS 5
#endif
int memoryTracker=0;
bool showCorrectNumTrain=false;
int showInterval=0;
pthread_mutex_t crmutex = PTHREAD_MUTEX_INITIALIZER;
bool threadExit=false;
int waitTime;
static pthread_barrier_t barrier;
static pthread_barrier_t barrier2;
void ReadMNIST_double(string filename, int NumberOfImages, int DataOfAnImage, vector<vector<double>> &arr);
void ReadMNIST_float(string filename, int NumberOfImages, int DataOfAnImage, vector<vector<float>> &arr);
void printTime(high_resolution_clock::time_point start, high_resolution_clock::time_point end);
void print_matrix(device_vector<double> &A, int nr_rows_A, int nr_cols_A);
typedef thrust::tuple<ULLI, ULLI> uTuple;
typedef thrust::tuple<double, double> dTuple;
typedef thrust::tuple<ULLI, double, double> tTuple;
typedef thrust::device_vector<double>::iterator doubleIterator;
typedef thrust::tuple<doubleIterator, doubleIterator> iterTuple;
typedef thrust::zip_iterator<iterTuple> zipIterator;
void ctrlchandler(int sig) {
printf("\nTrying to exit...\n");
threadExit=true;
}
void memTracker(int in, bool printIt) {
memoryTracker+=in;
if(printIt) {
cout << "Cuda memory tracker: Using(bytes): " << memoryTracker << " ";
cout << "(Kb): " << (memoryTracker/1024) << " ";
cout << "(Mb): " << ((memoryTracker/1024)/1024) << endl;
}
}
struct floatToDoubleFunctor : public thrust::unary_function<float,double> {
__device__ double operator()(float t) {
return (double)t;
}
};
struct fix_random_numbers : public thrust::unary_function<double, double> {
__device__ double operator()(double t) {
return (((double)t)*2.0)-1.0;
}
};
struct fix_random_numbers_f : public thrust::unary_function<float, float> {
__device__ float operator()(float t) {
return (((float)t)*2.0f)-1.0f;
}
};
void random_floats(float *A, int rowsA, int colsA) {
curandGenerator_t cg;
curandCreateGenerator(&cg, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(cg, (unsigned long long) clock());
curandGenerateUniform(cg, A, rowsA * colsA);
}
void random_doubles(double *A, int rowsA, int colsA) {
curandGenerator_t cg;
curandCreateGenerator(&cg, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(cg, (unsigned long long) clock());
curandGenerateUniformDouble(cg, A, rowsA * colsA);
}
struct update_w : public thrust::unary_function<int, void> {
double *weights;
double *newW;
double lRate;
update_w(double *w, double *_newW, double lr) : weights(w), newW(_newW), lRate(lr){}
__device__ void operator()(int t) {
double local=weights[t];
double local2=lRate;
double local3=newW[t];
double local4=local-local2*local3;
weights[t]=local4;
//weights[t]=weights[t]-lRate*newW[t];
}
};
struct update_b : public thrust::unary_function<int, void> {
double *biases;
double *newB;
double lRate;
update_b(double *b, double *_newB, double lr) : biases(b), newB(_newB), lRate(lr){}
__device__ void operator()(int t) {
double local=biases[t];
double local2=lRate;
double local3=newB[t];
double local4=local-local2*local3;
biases[t]=local4;
//biases[t]=biases[t]-lRate*newB[t];
}
};
struct update_wf : public thrust::unary_function<int, void> {
float *weights;
float *newW;
float lRate;
update_wf(float *w, float *_newW, float lr) : weights(w), newW(_newW), lRate(lr){}
__device__ void operator()(int t) {
float local=weights[t];
float local2=lRate;
float local3=newW[t];
float local4=local-local2*local3;
weights[t]=local4;
//weights[t]=weights[t]-lRate*newW[t];
}
};
struct update_bf : public thrust::unary_function<int, void> {
float *biases;
float *newB;
float lRate;
update_bf(float *b, float *_newB, float lr) : biases(b), newB(_newB), lRate(lr){}
__device__ void operator()(int t) {
float local=biases[t];
float local2=lRate;
float local3=newB[t];
float local4=local-local2*local3;
biases[t]=local4;
//biases[t]=biases[t]-lRate*newB[t];
}
};
template<typename T>
struct square {
__device__ T operator()(const T& x) const {
return x * x;
}
};
struct sigmoid_devrivative : public thrust::unary_function<double, double> {
__device__ double operator()(double t) {
double tt=1.0/(1.0+exp(-t));
return tt*(1.0-tt);
}
};
struct sigmoid : public thrust::unary_function<double, double> {
sigmoid(){}
__device__ double operator()(double t) {
return 1.0 / (1.0 + exp(-t));
}
};
struct exp_double : public thrust::unary_function<double, double> {
__device__ double operator()(double t) {
return exp(t);
}
};
struct forwardFeed_helper : public thrust::unary_function<int, double> {
double *inputs;
double *biases;
forwardFeed_helper(){}
forwardFeed_helper(double *_inputs, double* _biases) : inputs(_inputs), biases(_biases){}
__device__ double operator()(int t) {
//__device__ double operator()(thrust::tuple<double, double> t) {
double local=inputs[t];
local+=biases[t];
inputs[t]=local;
return 1.0/(1.0+exp(-local));
}
};
struct backProp_helper : public thrust::unary_function<int, double> {
double *innerDelta;
double *inputs;
backProp_helper(){}
backProp_helper(double* _innerDelta, double *_inputs) : innerDelta(_innerDelta), inputs(_inputs){}
__device__ double operator()(int t) {
double local=1.0/(1.0+exp(-inputs[t]));
local=local*(1.0-local);
return innerDelta[t]*local;
}
/*__device__ double operator()(thrust::tuple<double, double> t) {
double local=1.0/(1.0+exp(-thrust::get<0>(t)));
local=local*(1.0-local);
return thrust::get<1>(t)*local;
}*/
};
struct backProp_helper2 : public thrust::unary_function<double, double> {
double *outputs;
double *innerDelta;
backProp_helper2(){}
backProp_helper2(double *_outputs, double* _innerDelta) : innerDelta(_innerDelta), outputs(_outputs){}
__device__ double operator()(int t) {
double local=outputs[t];
local=local*(1.0-local);
return innerDelta[t]*local;
}
/*__device__ double operator()(thrust::tuple<double, double> t) {
double local=thrust::get<0>(t);
local=local*(1.0-local);
return thrust::get<1>(t)*local;
}*/
};
struct output_helper : public thrust::unary_function<int, double> {
double *inputs;
double *outputs;
double *labels;
double *innerDelta;
output_helper(double *_outputs, double *_inputs, double* _innerDelta, double* _labels) : outputs(_outputs), inputs(_inputs), innerDelta(_innerDelta), labels(_labels){}
__device__ double operator()(int t) {
double local=outputs[t]-labels[t];
double local2=1.0/(1.0+exp(-inputs[t]));
local2=local2*(1.0-local2);
return local2*local;
}
};
struct forwardFeed_helperf : public thrust::unary_function<int, float> {
float *inputs;
float *biases;
forwardFeed_helperf(){}
forwardFeed_helperf(float *_inputs, float* _biases) : inputs(_inputs), biases(_biases){}
__device__ float operator()(int t) {
//__device__ float operator()(thrust::tuple<float, float> t) {
float local=inputs[t];
local+=biases[t];
inputs[t]=local;
return 1.0/(1.0+exp(-local));
}
};
struct backProp_helperf : public thrust::unary_function<int, float> {
float *innerDelta;
float *inputs;
backProp_helperf(){}
backProp_helperf(float* _innerDelta, float *_inputs) : innerDelta(_innerDelta), inputs(_inputs){}
__device__ float operator()(int t) {
float local=1.0/(1.0+exp(-inputs[t]));
local=local*(1.0-local);
return innerDelta[t]*local;
}
/*__device__ float operator()(thrust::tuple<float, float> t) {
float local=1.0/(1.0+exp(-thrust::get<0>(t)));
local=local*(1.0-local);
return thrust::get<1>(t)*local;
}*/
};
struct backProp_helper2f : public thrust::unary_function<float, float> {
float *outputs;
float *innerDelta;
backProp_helper2f(){}
backProp_helper2f(float *_outputs, float* _innerDelta) : innerDelta(_innerDelta), outputs(_outputs){}
__device__ float operator()(int t) {
float local=outputs[t];
local=local*(1.0-local);
return innerDelta[t]*local;
}
/*__device__ float operator()(thrust::tuple<float, float> t) {
float local=thrust::get<0>(t);
local=local*(1.0-local);
return thrust::get<1>(t)*local;
}*/
};
struct output_helperf : public thrust::unary_function<int, float> {
float *inputs;
float *outputs;
float *labels;
float *innerDelta;
output_helperf(float *_outputs, float *_inputs, float* _innerDelta, float* _labels) : outputs(_outputs), inputs(_inputs), innerDelta(_innerDelta), labels(_labels){}
__device__ float operator()(int t) {
float local=outputs[t]-labels[t];
float local2=1.0/(1.0+exp(-inputs[t]));
local2=local2*(1.0-local2);
return local2*local;
}
};
class NN_layerf {
public:
device_vector<float> atNeuronOutputs;
device_vector<float> atNeuronInputs;
device_vector<float> weightsMatrix;
device_vector<float> biases;
device_vector<float> outerDeltaB;
device_vector<float> outerDeltaW;
device_vector<float> innerDeltaB;
device_vector<float> innerDeltaW;
NN_layerf(){}
NN_layerf(int sizeThis, int sizeNext, int pType) :
type(pType), thisSize(sizeThis), nextSize(sizeNext) {
allW=thisSize*nextSize;
allN=thisSize*batchSize;
/*if(type!=OUTPUT) {
weightsMatrix=device_vector<float>(allW);
}
if(type!=INPUT) {
biases=device_vector<float>(allN);
}*/
}
NN_layerf(int sizeThis, int sizeNext, int pBatchSize, int pType) :
type(pType), thisSize(sizeThis), nextSize(sizeNext), batchSize(pBatchSize) {
setupLayer(true);
}
void setupLayer(bool newLayer) {
atNeuronOutputs=device_vector<float>(batchSize*thisSize,0.0);
allW=thisSize*nextSize;
allN=thisSize*batchSize;
memTracker(allN*8,false);
counterN=device_vector<int>(allN,0.0);
counterW=device_vector<int>(allW,0.0);
thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(allN),counterN.begin(),counterN.begin(),thrust::plus<int>());
thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(allW),counterW.begin(),counterW.begin(),thrust::plus<int>());
memTracker(allN*sizeof(int),false);
memTracker(allW*sizeof(int),false);
if(newLayer) {
if(type!=INPUT) {
atNeuronInputs=device_vector<float>(batchSize*thisSize,0.0);
memTracker(allN*8,false);
biases=device_vector<float>(thisSize*batchSize,0.0);
memTracker(allN*8*3,false);
outerDeltaB=device_vector<float>(allN,0.0);
innerDeltaB=device_vector<float>(allN,0.0);
} else {
cudaFree(&atNeuronInputs);
cudaFree(&biases);
cudaFree(&outerDeltaB);
cudaFree(&innerDeltaB);
}
if(type!=OUTPUT) {
weightsMatrix=device_vector<float>(thisSize*nextSize);
memTracker(allW*8*3,false);
outerDeltaW=vector<float>(allW,0.0);
innerDeltaW=vector<float>(allW,0.0);
random_floats(thrust::raw_pointer_cast(&weightsMatrix[0]),thisSize,nextSize);
thrust::transform(weightsMatrix.begin(),weightsMatrix.end(),weightsMatrix.begin(),fix_random_numbers_f());
cout << "thisSize: " << thisSize << " nextSize: " << nextSize << " thisSize*nextSize: " << (thisSize*nextSize) << endl;
} else {
cudaFree(&weightsMatrix);
cudaFree(&outerDeltaW);
cudaFree(&innerDeltaW);
}
} else {
if(type!=INPUT) {
atNeuronInputs=device_vector<float>(batchSize*thisSize,0.0);
} else {
cudaFree(&atNeuronInputs);
cudaFree(&biases);
}
if(type==OUTPUT) {
cudaFree(&weightsMatrix);
}
}
}
int type, thisSize, nextSize, batchSize, allW, allN;
device_vector<int> counterN;
device_vector<int> counterW;
};
class NN_layer {
public:
device_vector<double> atNeuronOutputs;
device_vector<double> atNeuronInputs;
device_vector<double> weightsMatrix;
device_vector<double> biases;
device_vector<double> outerDeltaB;
device_vector<double> outerDeltaW;
device_vector<double> innerDeltaB;
device_vector<double> innerDeltaW;
NN_layer(){}
NN_layer(int sizeThis, int sizeNext, int pType) :
type(pType), thisSize(sizeThis), nextSize(sizeNext) {
allW=thisSize*nextSize;
allN=thisSize*batchSize;
/*if(type!=OUTPUT) {
weightsMatrix=device_vector<double>(allW);
}
if(type!=INPUT) {
biases=device_vector<double>(allN);
}*/
}
NN_layer(int sizeThis, int sizeNext, int pBatchSize, int pType) :
type(pType), thisSize(sizeThis), nextSize(sizeNext), batchSize(pBatchSize) {
setupLayer(true);
}
void setupLayer(bool newLayer) {
atNeuronOutputs=device_vector<double>(batchSize*thisSize,0.0);
allW=thisSize*nextSize;
allN=thisSize*batchSize;
memTracker(allN*8,false);
counterN=device_vector<int>(allN,0.0);
counterW=device_vector<int>(allW,0.0);
thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(allN),counterN.begin(),counterN.begin(),thrust::plus<int>());
thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(allW),counterW.begin(),counterW.begin(),thrust::plus<int>());
memTracker(allN*sizeof(int),false);
memTracker(allW*sizeof(int),false);
if(newLayer) {
if(type!=INPUT) {
atNeuronInputs=device_vector<double>(batchSize*thisSize,0.0);
memTracker(allN*8,false);
biases=device_vector<double>(thisSize*batchSize,0.0);
memTracker(allN*8*3,false);
outerDeltaB=device_vector<double>(allN,0.0);
innerDeltaB=device_vector<double>(allN,0.0);
} else {
cudaFree(&atNeuronInputs);
cudaFree(&biases);
cudaFree(&outerDeltaB);
cudaFree(&innerDeltaB);
}
if(type!=OUTPUT) {
weightsMatrix=device_vector<double>(thisSize*nextSize);
memTracker(allW*8*3,false);
outerDeltaW=vector<double>(allW,0.0);
innerDeltaW=vector<double>(allW,0.0);
random_doubles(thrust::raw_pointer_cast(&weightsMatrix[0]),thisSize,nextSize);
thrust::transform(weightsMatrix.begin(),weightsMatrix.end(),weightsMatrix.begin(),fix_random_numbers());
cout << "thisSize: " << thisSize << " nextSize: " << nextSize << " thisSize*nextSize: " << (thisSize*nextSize) << endl;
} else {
cudaFree(&weightsMatrix);
cudaFree(&outerDeltaW);
cudaFree(&innerDeltaW);
}
} else {
if(type!=INPUT) {
atNeuronInputs=device_vector<double>(batchSize*thisSize,0.0);
} else {
cudaFree(&atNeuronInputs);
cudaFree(&biases);
}
if(type==OUTPUT) {
cudaFree(&weightsMatrix);
}
}
}
int type, thisSize, nextSize, batchSize, allW, allN;
device_vector<int> counterN;
device_vector<int> counterW;
};
struct idLink {
int whichThread;
int interval;
device_vector<double> *data;
device_vector<double> *labels;
vector<NN_layer> *NNlayersQ;
vector<int> *hiddenMatrix;
double learningRate;
int batchSize;
cublasHandle_t handle;
};
void *fourthThread(void *thread_parm) {
idLink data=*((idLink*) thread_parm);
int myID=data.whichThread;
int myDev=myID;//3-myID;
//if(myID==1){myDev=1;}
if(myDev) {
cudaSetDevice(myDev);
cudaDeviceEnablePeerAccess(0,0);//cudaDeviceEnablePeerAccess ( int peerDevice, unsigned int flags )
} else {
cudaDeviceEnablePeerAccess(1,0);
}
//cout << "myID started: " << myID << endl;
cublasHandle_t handle;//=data.handle;
cublasCreate(&handle);
int howMany=data.interval;
vector<int> hiddenMatrix=*data.hiddenMatrix;
int layers=hiddenMatrix.size();
int outputsIndex=layers-1;
int batchSize=data.batchSize;
int numOutputs=hiddenMatrix[outputsIndex];
int mOut, ii, mPlus, nextSize, prevSize, thisSize;
device_vector<double> *which;
bool gotTime=false;
high_resolution_clock::time_point startTime, endTime;
int timeCountDown=10;
const double alf = 1;
const double bet = 0;
const double *alpha = &alf;
const double *beta = &bet;
//double toDivideRMS=data.learningRate/(double)batchSize;
while(!threadExit) {
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::fill((*data.NNlayersQ)[ii].outerDeltaB.begin(),(*data.NNlayersQ)[ii].outerDeltaB.end(),0.0);
thrust::fill((*data.NNlayersQ)[i].outerDeltaW.begin(),(*data.NNlayersQ)[i].outerDeltaW.end(),0.0);
}//*/
for(int h=0;h<howMany;++h) {
//cout << "myID: " << myID << " howMany: " << howMany << "\n";
if(!myID && !gotTime && !timeCountDown) {
startTime=high_resolution_clock::now();
}
//forward propagation
which=data.data;
for(int i=0;i<outputsIndex;++i) {
//cout << "myID: " << myID << " here\n";
ii=i+1;
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[ii];
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, nextSize, batchSize, thisSize, alpha, (*data.NNlayersQ)[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, (*data.NNlayersQ)[ii].atNeuronInputs.data().get(), nextSize);
thrust::transform((*data.NNlayersQ)[ii].counterN.begin(),(*data.NNlayersQ)[ii].counterN.end(),(*data.NNlayersQ)[ii].atNeuronOutputs.begin(),forwardFeed_helper((*data.NNlayersQ)[ii].atNeuronInputs.data().get(),(*data.NNlayersQ)[ii].biases.data().get()));
which=&(*data.NNlayersQ)[ii].atNeuronOutputs;
}
//Backward propagation
mOut=outputsIndex-1;
mPlus=outputsIndex;
prevSize=hiddenMatrix[mOut];
thrust::transform((*data.NNlayersQ)[outputsIndex].counterN.begin(),(*data.NNlayersQ)[outputsIndex].counterN.end(),(*data.NNlayersQ)[outputsIndex].innerDeltaB.begin(),output_helper((*data.NNlayersQ)[outputsIndex].atNeuronOutputs.data().get(),(*data.NNlayersQ)[outputsIndex].atNeuronInputs.data().get(),(*data.NNlayersQ)[outputsIndex].innerDeltaB.data().get(),(*data.labels).data().get()));
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, numOutputs, prevSize, batchSize, alpha, (*data.NNlayersQ)[outputsIndex].innerDeltaB.data().get(), numOutputs, (*data.NNlayersQ)[mOut].atNeuronOutputs.data().get(), prevSize, beta, (*data.NNlayersQ)[mOut].innerDeltaW.data().get(), numOutputs);
--mOut;
for(int i=outputsIndex-1;i;--i) {
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[i+1];
prevSize=hiddenMatrix[i-1];
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, thisSize, batchSize, nextSize, alpha, (*data.NNlayersQ)[i].weightsMatrix.data().get(), nextSize, (*data.NNlayersQ)[i+1].innerDeltaB.data().get(), nextSize, beta, (*data.NNlayersQ)[i].innerDeltaB.data().get(), thisSize);
if(i!=1) {
thrust::transform((*data.NNlayersQ)[i].counterN.begin(),(*data.NNlayersQ)[i].counterN.end(),(*data.NNlayersQ)[i].innerDeltaB.begin(),backProp_helper2((*data.NNlayersQ)[i].atNeuronOutputs.data().get(),(*data.NNlayersQ)[i].innerDeltaB.data().get()));
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, thisSize, prevSize, batchSize, alpha, (*data.NNlayersQ)[i].innerDeltaB.data().get(), thisSize, (*data.NNlayersQ)[i-1].atNeuronOutputs.data().get(), prevSize, beta, (*data.NNlayersQ)[mOut].innerDeltaW.data().get(), thisSize);
} else {
thrust::transform((*data.NNlayersQ)[i].counterN.begin(),(*data.NNlayersQ)[i].counterN.end(),(*data.NNlayersQ)[i].innerDeltaB.begin(),backProp_helper((*data.NNlayersQ)[i].innerDeltaB.data().get(),(*data.NNlayersQ)[i].atNeuronInputs.data().get()));
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, thisSize, prevSize, batchSize, alpha, (*data.NNlayersQ)[i].innerDeltaB.data().get(), thisSize, (*data.data).data().get(), prevSize, beta, (*data.NNlayersQ)[mOut].innerDeltaW.data().get(), thisSize);
}
--mOut;
--mPlus;
}
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::transform((*data.NNlayersQ)[ii].innerDeltaB.begin(),(*data.NNlayersQ)[ii].innerDeltaB.end(),(*data.NNlayersQ)[ii].outerDeltaB.begin(),(*data.NNlayersQ)[ii].outerDeltaB.begin(),thrust::plus<double>());
thrust::transform((*data.NNlayersQ)[i].innerDeltaW.begin(),(*data.NNlayersQ)[i].innerDeltaW.end(),(*data.NNlayersQ)[i].outerDeltaW.begin(),(*data.NNlayersQ)[i].outerDeltaW.begin(),thrust::plus<double>());
}//*/
/*for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::for_each((*data.NNlayersQ)[i].counterW.begin(),(*data.NNlayersQ)[i].counterW.end(),update_w((*data.NNlayersQ)[i].weightsMatrix.data().get(),(*data.NNlayersQ)[i].innerDeltaW.data().get(),toDivideRMS));
thrust::for_each((*data.NNlayersQ)[ii].counterN.begin(),(*data.NNlayersQ)[ii].counterN.end(),update_b((*data.NNlayersQ)[ii].biases.data().get(),(*data.NNlayersQ)[ii].innerDeltaB.data().get(),toDivideRMS));
}//*/
/*for(int i=0;i<outputsIndex;++i) {
thrust::for_each(make_counting_iterator(0),make_counting_iterator((*data.NNlayersQ)[i].allW),update_w((*data.NNlayersQ)[i].weightsMatrix.data().get(),(*data.outerDeltaW)[i].data().get(),toDivideRMS));
thrust::for_each(make_counting_iterator(0),make_counting_iterator((*data.NNlayersQ)[i+1].allN),update_b((*data.NNlayersQ)[i+1].biases.data().get(),(*data.outerDeltaB)[i].data().get(),toDivideRMS));
}//*/
}
if(!myID) {
if(!gotTime) {
if(timeCountDown) {
--timeCountDown;
} else {
endTime=high_resolution_clock::now();
double seconds=duration_cast<microseconds>(endTime-startTime).count()/1000000.0;
printf("Update time interval approximately %.5f seconds apart(%.5f seconds per)\n",(seconds*(double)howMany)+1.0,seconds);
gotTime=true;
}
}
}
//}
//cout << "thread: " << myID << " before barrier one\n";
pthread_barrier_wait(&barrier);
//cout << "thread: " << myID << " after barrier one\n";
pthread_barrier_wait(&barrier2);
//cout << "thread: " << myID << " after barrier two\n";
}
cublasDestroy(handle);
free(thread_parm);
pthread_exit(0);
}
struct divFour : public thrust::unary_function<double, double> {
double what;
divFour(double _what) : what(_what){}
__device__ double operator()(double t) {
return t/what;
}
};
class neuralNet {
public:
neuralNet(){}
neuralNet(string _inFile) : inFile(_inFile) {
cout << "Setting up network...\n";
cublasCreate(&handle);
loadState();
cout << "Layers: ";
for(auto h:hiddenMatrix) {
cout << h << " ";
}
cout << "Batch size: " << batchSize << endl << endl;
}
//cublasXtHandle_t handlex;
neuralNet(int _numInputs, int _numOutputs, vector<int> &_hiddenMatrix, int pBatchSize, int _numThreads) :
hiddenMatrix(_hiddenMatrix), RMS(DBL_MAX), minRMS(DBL_MAX), batchSize(pBatchSize) {
//cublasXtCreate(&handlex);
//int dev[2]={0,1};
//cublasXtDeviceSelect(handlex,2,dev);
numThreads=_numThreads;
cublasCreate(&handle);
numInputs=_numInputs;
numOutputs=_numOutputs;
hiddenMatrix.insert(hiddenMatrix.begin(),numInputs);
hiddenMatrix.push_back(numOutputs);
for(int i=0;i<numThreads;++i) {
NNlayersQ[i]=vector<NN_layer>(hiddenMatrix.size());
}
layers=hiddenMatrix.size();
outputsIndex=layers-1;
cout << "Setting up network...\n";
cout << "Layers: ";
for(auto h:hiddenMatrix) {
cout << h << " ";
}
batchSize=10000;
cout << "Batch size: " << batchSize << endl << endl;
/*int who;
for(int i=3;i>0;--i) {
if(i!=2) {
cudaSetDevice(i);
for(int j=3;j>0;--j) {
if(i!=j) {
cudaDeviceEnablePeerAccess(j,0);
cudaDeviceCanAccessPeer(&who, i, j);//cudaDeviceCanAccessPeer(int* canAccessPeer, int device, int peerDevice);
cout << "who returned: " << who << " for device: " << i << " peerDevice: " << j << endl;
}
}
}
}
cudaSetDevice(3);//*/
for(int j=0;j<numThreads;++j) {
//cudaSetDevice(3-j);
NNlayersQ[j][0]=NN_layer(hiddenMatrix[0],hiddenMatrix[1],batchSize,INPUT);
for(int i=1;i<outputsIndex;++i) {
NNlayersQ[j][i]=NN_layer(hiddenMatrix[i],hiddenMatrix[i+1],batchSize,HIDDEN);
}
NNlayersQ[j][outputsIndex]=NN_layer(hiddenMatrix[outputsIndex],0,batchSize,OUTPUT);
}
for(int i=1;i<numThreads;++i) {
for(int j=0;j<outputsIndex;++j) {
thrust::copy(NNlayersQ[i-1][j].weightsMatrix.begin(),NNlayersQ[i-1][j].weightsMatrix.end(),NNlayersQ[i][j].weightsMatrix.begin());
thrust::copy(NNlayersQ[i-1][j+1].biases.begin(),NNlayersQ[i-1][j+1].biases.end(),NNlayersQ[i][j+1].biases.begin());
}
}
}
void train_Quad(vector<vector<double>> &pData, vector<vector<double>> &pLabels, ULLI maxIter,
float RMSwant, int doDataSetSize, double lRate, vector<vector<double>> &pTestData, vector<vector<double>> &pTestLabels, bool vlRate) {
if(!showInterval) {
showInterval=10;
}
vector<int> bLabels;
for(auto p:pLabels) {
bLabels.push_back(std::max_element(p.begin(), p.end())-p.begin());
}
const double alf = 1;
const double bet = 0;
const double *alpha = &alf;
const double *beta = &bet;
if(lRate<0.0) {
learningRate=0.05;
} else {
learningRate=lRate;
}
dataSetSize=60000;
int testBatchSize=10000;
int batchStart,batchEnd, thisSize, nextSize;
RMSwanted=RMSwant;
maxEpochs=maxIter;
itemSize=pData[0].size();
int testSetSize=pTestData.size();
vector<int> btLabels;
device_vector<double> testData[testSetSize/testBatchSize];
if(testSetSize) {
for(auto p:pTestLabels) {
btLabels.push_back(std::max_element(p.begin(), p.end())-p.begin());
}
}
device_vector<double> data[numThreads][dataSetSize/(batchSize*numThreads)];
device_vector<double> labels[numThreads][dataSetSize/(batchSize*numThreads)];
//Creating pre-made batches so I can simply copy them to layer[0]
cout << "Making batches in memory...\n";
int whichBatch=0;
int iii=0;
int itemsPerThread=dataSetSize/numThreads;
int batchesEach;
//cout << "itemsPerThread: " << itemsPerThread << endl;
for(int itemNum=0;itemNum<dataSetSize;itemNum+=batchSize) {
batchStart=0;
batchEnd=0;
if(((iii+1)*itemsPerThread)==itemNum) {
++iii;
batchesEach=whichBatch;
whichBatch=0;
}
//cout << "iii+1: " << (iii+1) << " itemNum: " << itemNum << " *:" << ((iii+1)*itemsPerThread) << " whichBatch: " << whichBatch << endl;
data[iii][whichBatch]=vector<double>(itemSize*batchSize);
memTracker(itemSize*batchSize*8,false);
labels[iii][whichBatch]=vector<double>(batchSize*numOutputs);
memTracker(numOutputs*batchSize*8,false);
for(int b=0;b<batchSize;++b) {
thrust::copy(pData[itemNum+b].begin(),pData[itemNum+b].end(),data[iii][whichBatch].begin()+batchStart);
thrust::copy(pLabels[itemNum+b].begin(),pLabels[itemNum+b].end(),labels[iii][whichBatch].begin()+batchEnd);
batchStart+=itemSize;
batchEnd+=numOutputs;
}
++whichBatch;
}
whichBatch=0;
for(int itemNum=0;itemNum<testSetSize;itemNum+=testBatchSize) {
testData[whichBatch]=vector<double>(itemSize*testBatchSize);
memTracker(itemSize*testBatchSize*8,false);
batchStart=0;
for(int j=0;j<testBatchSize;++j) {
thrust::copy(pTestData[itemNum+j].begin(),pTestData[itemNum+j].end(),testData[whichBatch].begin()+batchStart);
batchStart+=itemSize;
}
++whichBatch;
}
cout << "Starting training...\n";
device_vector<double>::iterator iter;
int position;
int gotRight=0;
//int numBatches=dataSetSize/batchSize;
//toDivideRMS=learningRate/((double)numBatches*(double)batchSize);
//toDivideRMS=learningRate/((double)batchSize*(double)showInterval);
//toDivideRMS=learningRate/((double)batchSize*(double)num_nodes);//*(double)showInterval);
toDivideRMS=learningRate/(double)batchSize;
//toDivideRMS=learningRate/(double)showInterval;
int maxGotRight=0, maxTestRight=-1, ii;
device_vector<double> *which;
double seconds, totalTime=0.0;
high_resolution_clock::time_point startTime, endTime;
int showIntervalCountDown=showInterval;
//int sInterval=showInterval;
bool once=true;
vector<pthread_t> threads;
pthread_attr_t attr;
cpu_set_t cpus;
pthread_attr_init(&attr);
divFour dThreads((double)numThreads);
//multi_helper hTimes((double)numberOfProcessors);
/*vector<double> tempDeltaB[outputsIndex];
vector<double> tempDeltaW[outputsIndex];
for(int i=0;i<outputsIndex;++i) {
tempDeltaW[i]=vector<double>(NNlayersQ[0][i].allW,0.0);
memTracker(NNlayersQ[0][i].allW*8,false);
tempDeltaB[i]=vector<double>(NNlayersQ[0][i+1].allN,0.0);
memTracker(NNlayersQ[0][i+1].allN*8,false);
}*/
memTracker(0,true);
for(int epochNum=0;!threadExit && epochNum<maxEpochs && maxGotRight!=dataSetSize && maxTestRight!=testSetSize;++epochNum) {//epochNum+=sInterval) {
startTime=high_resolution_clock::now();
if(once) {
for(int j=0;j<numThreads;++j) {
CPU_ZERO(&cpus);
CPU_SET(j, &cpus);
pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpus);
threads.push_back(pthread_t());
idLink *arg = (idLink*)malloc(sizeof(*arg));
(*arg).whichThread=j;
(*arg).data=&data[j][0];
(*arg).labels=&labels[j][0];
(*arg).hiddenMatrix=&hiddenMatrix;
(*arg).interval=batchesEach;
(*arg).NNlayersQ=&NNlayersQ[j];
(*arg).learningRate=learningRate;
(*arg).batchSize=batchSize;
(*arg).handle=handle;
pthread_create(&threads.at(j), &attr, fourthThread, arg);
}
once=false;
}
pthread_barrier_wait(&barrier);
//cout << "all got to here\n";
for(int i=1;i<numThreads;++i) {
for(int j=0;j<outputsIndex;++j) {
ii=j+1;
thrust::transform(NNlayersQ[0][j].outerDeltaW.begin(),NNlayersQ[0][j].outerDeltaW.end(),NNlayersQ[i][j].outerDeltaW.begin(),NNlayersQ[0][j].outerDeltaW.begin(),thrust::plus<double>());
thrust::transform(NNlayersQ[0][ii].outerDeltaB.begin(),NNlayersQ[0][ii].outerDeltaB.end(),NNlayersQ[i][ii].outerDeltaB.begin(),NNlayersQ[0][ii].outerDeltaB.begin(),thrust::plus<double>());
//thrust::transform(NNlayersQ[0][j].innerDeltaW.begin(),NNlayersQ[0][j].innerDeltaW.end(),NNlayersQ[i][j].innerDeltaW.begin(),NNlayersQ[0][j].innerDeltaW.begin(),thrust::plus<double>());
//thrust::transform(NNlayersQ[0][ii].innerDeltaB.begin(),NNlayersQ[0][ii].innerDeltaB.end(),NNlayersQ[i][ii].innerDeltaB.begin(),NNlayersQ[0][ii].innerDeltaB.begin(),thrust::plus<double>());
}
}
/*for(int j=0;j<outputsIndex;++j) {
ii=j+1;
thrust::for_each(NNlayersQ[0][j].counterW.begin(),NNlayersQ[0][j].counterW.end(),update_w(&NNlayersQ[0][j].weightsMatrix[0],&NNlayersQ[0][j].outerDeltaW[0],toDivideRMS));
thrust::for_each(NNlayersQ[0][ii].counterN.begin(),NNlayersQ[0][ii].counterN.end(),update_b(&NNlayersQ[0][ii].biases[0],&NNlayersQ[0][ii].outerDeltaB[0],toDivideRMS));
}//*/
for(int i=0;i<numThreads;++i) {
for(int j=0;j<outputsIndex;++j) {
ii=j+1;
//thrust::for_each(NNlayersQ[i][j].counterW.begin(),NNlayersQ[i][j].counterW.end(),update_w(NNlayersQ[i][j].weightsMatrix.data().get(),NNlayersQ[0][j].innerDeltaW.data().get(),toDivideRMS));
//thrust::for_each(NNlayersQ[i][ii].counterN.begin(),NNlayersQ[i][ii].counterN.end(),update_b(NNlayersQ[i][ii].biases.data().get(),NNlayersQ[0][ii].innerDeltaB.data().get(),toDivideRMS));
//thrust::for_each(NNlayersQ[i][j].counterW.begin(),NNlayersQ[i][j].counterW.end(),update_w(&NNlayersQ[i][j].weightsMatrix[0],&tempDeltaW[j][0],toDivideRMS));
//thrust::for_each(NNlayersQ[i][ii].counterN.begin(),NNlayersQ[i][ii].counterN.end(),update_b(&NNlayersQ[i][ii].biases[0],&tempDeltaB[j][0],toDivideRMS));
thrust::for_each(NNlayersQ[i][j].counterW.begin(),NNlayersQ[i][j].counterW.end(),update_w(NNlayersQ[i][j].weightsMatrix.data().get(),NNlayersQ[i][j].outerDeltaW.data().get(),toDivideRMS));
thrust::for_each(NNlayersQ[i][ii].counterN.begin(),NNlayersQ[i][ii].counterN.end(),update_b(NNlayersQ[i][ii].biases.data().get(),NNlayersQ[i][ii].outerDeltaB.data().get(),toDivideRMS));
}
}//*/
if(showIntervalCountDown) {
--showIntervalCountDown;
endTime=high_resolution_clock::now();
seconds=duration_cast<microseconds>(endTime-startTime).count()/1000000.0;
totalTime+=seconds;
pthread_barrier_wait(&barrier2);
continue;
} else {
showIntervalCountDown=showInterval;
/*for(int i=0;i<outputsIndex;++i) {
ii=i+1;
//MPI_Allreduce(&NNlayersQ[0][i].outerDeltaW[0],&NNlayersQ[1][i].outerDeltaW[0],NNlayersQ[0][i].allW,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
//MPI_Allreduce(&NNlayersQ[0][ii].outerDeltaB[0],&NNlayersQ[1][ii].outerDeltaB[0],NNlayersQ[0][ii].allN,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
//MPI_Allreduce(&NNlayersQ[0][i].innerDeltaW[0],&tempDeltaW[i][0],NNlayersQ[0][i].allW,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
//MPI_Allreduce(&NNlayersQ[0][i+1].innerDeltaB[0],&tempDeltaB[i][0],NNlayersQ[0][i+1].allN,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
thrust::transform(NNlayersQ[0][i].weightsMatrix.begin(),NNlayersQ[0][i].weightsMatrix.end(),NNlayersQ[2][i].weightsMatrix.begin(),hTimes);
thrust::transform(NNlayersQ[0][ii].biases.begin(),NNlayersQ[0][ii].biases.end(),NNlayersQ[2][ii].biases.begin(),hTimes);
//MPI_Allreduce(&NNlayersQ[2][i].weightsMatrix[0],&NNlayersQ[1][i].weightsMatrix[0],NNlayersQ[0][i].allW,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
//MPI_Allreduce(&NNlayersQ[2][ii].biases[0],&NNlayersQ[1][ii].biases[0],NNlayersQ[0][ii].allN,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD);
thrust::transform(NNlayersQ[1][i].weightsMatrix.begin(),NNlayersQ[1][i].weightsMatrix.end(),NNlayersQ[0][i].weightsMatrix.begin(),dThreads);
thrust::transform(NNlayersQ[1][ii].biases.begin(),NNlayersQ[1][ii].biases.end(),NNlayersQ[0][ii].biases.begin(),dThreads);
}*/
/*for(int i=0;i<numThreads;++i) {
for(int j=0;j<outputsIndex;++j) {
ii=j+1;
//thrust::for_each(NNlayersQ[i][j].counterW.begin(),NNlayersQ[i][j].counterW.end(),update_w(&NNlayersQ[i][j].weightsMatrix[0],&NNlayersQ[1][j].outerDeltaW[0],toDivideRMS));
//thrust::for_each(NNlayersQ[i][ii].counterN.begin(),NNlayersQ[i][ii].counterN.end(),update_b(&NNlayersQ[i][ii].biases[0],&NNlayersQ[1][ii].outerDeltaB[0],toDivideRMS));
thrust::copy(NNlayersQ[0][j].weightsMatrix.begin(),NNlayersQ[0][j].weightsMatrix.end(),NNlayersQ[i][j].weightsMatrix.begin());
thrust::copy(NNlayersQ[0][ii].biases.begin(),NNlayersQ[0][ii].biases.end(),NNlayersQ[i][ii].biases.begin());
}
}*/
}
gotRight=0;
whichBatch=0;
iii=0;
for(int itemNum=0;itemNum<dataSetSize;itemNum+=batchSize) {
if(((iii+1)*itemsPerThread)==itemNum) {
++iii;
whichBatch=0;
}
//forward propagation
which=&data[iii][whichBatch];
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[ii];
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayersQ[0][i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayersQ[0][ii].atNeuronInputs.data().get(), nextSize);
thrust::transform(NNlayersQ[0][ii].counterN.begin(),NNlayersQ[0][ii].counterN.end(),NNlayersQ[0][ii].atNeuronOutputs.begin(),forwardFeed_helper(NNlayersQ[0][ii].atNeuronInputs.data().get(),NNlayersQ[0][ii].biases.data().get()));
which=&NNlayersQ[0][ii].atNeuronOutputs;
}
batchStart=0;
batchEnd=numOutputs;
//printf("\nbatch starting at: %d\n",itemNum);
for(int b=0;b<batchSize;++b) {
iter = thrust::max_element(NNlayersQ[0][outputsIndex].atNeuronOutputs.begin()+batchStart, NNlayersQ[0][outputsIndex].atNeuronOutputs.begin()+batchEnd);
position = iter - NNlayersQ[0][outputsIndex].atNeuronOutputs.begin();
position -= batchStart;
/*printf("output: %d expected: %d\n",position,bLabels[itemNum+b]);
for(int ot=batchStart;ot<batchEnd;++ot) {
double oo=NNlayersQ[0][outputsIndex].atNeuronOutputs[ot];
printf("%.5f ",oo);
}
printf("\n");//*/
if(position==bLabels[itemNum+b]) {
++gotRight;
}
batchStart=batchEnd;
batchEnd+=numOutputs;
}
++whichBatch;
}
if(gotRight>maxGotRight){maxGotRight=gotRight;}
printf("Epoch: %d-Got %d of %d-max right: %d-lRate: %.5f-",epochNum,gotRight,dataSetSize,maxGotRight,learningRate);
gotRight=0;
whichBatch=0;
for(int t=0;t<testSetSize;t+=testBatchSize) {
which=&testData[whichBatch];
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[ii];
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, nextSize, testBatchSize, thisSize, alpha, NNlayersQ[0][i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayersQ[0][ii].atNeuronInputs.data().get(), nextSize);
thrust::transform(NNlayersQ[0][ii].counterN.begin(),NNlayersQ[0][ii].counterN.end(),NNlayersQ[0][ii].atNeuronOutputs.begin(),forwardFeed_helper(NNlayersQ[0][ii].atNeuronInputs.data().get(),NNlayersQ[0][ii].biases.data().get()));
which=&NNlayersQ[0][ii].atNeuronOutputs;
}
batchStart=0;
batchEnd=numOutputs;
//printf("\nbatch starting at: %d\n",t);
for(int b=0;b<testBatchSize;++b) {
iter = thrust::max_element(NNlayersQ[0][outputsIndex].atNeuronOutputs.begin()+batchStart, NNlayersQ[0][outputsIndex].atNeuronOutputs.begin()+batchEnd);
position = iter - NNlayersQ[0][outputsIndex].atNeuronOutputs.begin();
position -= batchStart;
/*printf("output: %d expected: %d\n",position,btLabels[t+b]);
for(int ot=batchStart;ot<batchEnd;++ot) {
double oo=NNlayersQ[0][outputsIndex].atNeuronOutputs[ot];
printf("%.5f ",oo);
}
printf("\n");//*/
if(position==btLabels[t+b]) {
++gotRight;
}
batchStart=batchEnd;
batchEnd+=numOutputs;
}
++whichBatch;
}
if(gotRight>maxTestRight){maxTestRight=gotRight;}
endTime=high_resolution_clock::now();
seconds=duration_cast<microseconds>(endTime-startTime).count()/1000000.0;
totalTime+=seconds;
double errRate=(1.0-((double)gotRight/(double)testSetSize))*100.0;
printf("Test-Got %d of %d-max right: %d-totTime: %.5f-eRate:%.5f perc\n",gotRight,testSetSize,maxTestRight,totalTime,errRate);
if(testSetSize!=gotRight) {
/*for(int i=1;i<numThreads;++i) {
for(int j=0;j<outputsIndex;++j) {
ii=j+1;
thrust::copy(NNlayersQ[0][j].weightsMatrix.begin(),NNlayersQ[0][j].weightsMatrix.end(),NNlayersQ[i][j].weightsMatrix.begin());
thrust::copy(NNlayersQ[0][ii].biases.begin(),NNlayersQ[0][ii].biases.end(),NNlayersQ[i][ii].biases.begin());
}
}*/
} else {
threadExit=true;
}
pthread_barrier_wait(&barrier2);
}
int status;
void * result;
for (int i=0; i < numThreads; ++i) {
if ((status = pthread_join(threads.at(i), &result)) != 0) {
fprintf (stderr, "join error %d: %s\n", status, strerror(status));
}
}
//saveStateQ("MPIv2-");
}
int numThreads;
void saveStateQ(string outFile) {
outFile+="Cuda-"+to_string(dataSetSize);
cout << "Writing weights to file: " << outFile << endl;
ofstream oFile(outFile, ios::binary|ios::out);
if(oFile.is_open()) {
oFile.write((char*)&epoch,sizeof(ULLI));
oFile.write((char*)&layers,sizeof(ULLI));
for(int i=0;i<hiddenMatrix.size();++i) {
oFile.write((char*)&hiddenMatrix[i],sizeof(int));
}
oFile.write((char*)&batchSize,sizeof(int));
oFile.write((char*)&learningRate,sizeof(double));
for(int i=0;i<outputsIndex;++i) {
for(int j=0;j<NNlayersQ[0][i].allW;++j) {
double o=NNlayersQ[0][i].weightsMatrix[j];
oFile.write((char*)&o,sizeof(double));
}
}
for(int i=1;i<layers;++i) {
for(int j=0;j<NNlayersQ[0][i].allN;++j) {
double o=NNlayersQ[0][i].biases[j];
oFile.write((char*)&o,sizeof(double));
}
}
oFile.close();
}
cout << "Done\n";
}
neuralNet(int _numInputs, int _numOutputs, vector<int> &_hiddenMatrix, int pBatchSize) :
hiddenMatrix(_hiddenMatrix), RMS(DBL_MAX), minRMS(DBL_MAX), batchSize(pBatchSize) {
//cublasXtCreate(&handlex);
//int dev[3]={1,2,3};
//cublasXtDeviceSelect(handlex,3,dev);
if(batchSize<100) {
batchSize=10000;
}
cublasCreate(&handle);
numInputs=_numInputs;
numOutputs=_numOutputs;
hiddenMatrix.insert(hiddenMatrix.begin(),numInputs);
hiddenMatrix.push_back(numOutputs);
NNlayers=vector<NN_layer>(hiddenMatrix.size());
layers=hiddenMatrix.size();
outputsIndex=layers-1;
cout << "Setting up network...\n";
cout << "Layers: ";
for(auto h:hiddenMatrix) {
cout << h << " ";
}
cout << "Batch size: " << batchSize << endl << endl;
NNlayers[0]=NN_layer(hiddenMatrix[0],hiddenMatrix[1],batchSize,INPUT);
for(int i=1;i<outputsIndex;++i) {
NNlayers[i]=NN_layer(hiddenMatrix[i],hiddenMatrix[i+1],batchSize,HIDDEN);
}
NNlayers[outputsIndex]=NN_layer(hiddenMatrix[outputsIndex],0,batchSize,OUTPUT);
}
void train_MatMul(vector<vector<float>> &pData, vector<vector<double>> &pLabels, ULLI maxIter,
float RMSwant, int doDataSetSize, double lRate, vector<vector<float>> &pTestData, vector<vector<double>> &pTestLabels, bool vlRate) {
if(!showInterval) {
showInterval=10;
}
vector<UNCHAR> bLabels;
for(auto p:pLabels) {
bLabels.push_back((UNCHAR)(thrust::max_element(p.begin(), p.end())-p.begin()));
}
if(lRate<0.0) {
learningRate=0.05;
} else {
learningRate=lRate;
}
if(!doDataSetSize) {
doDataSetSize=60000;
}
dataSetSize=doDataSetSize;
const double alf = 1;
const double bet = 0;
const double *alpha = &alf;
const double *beta = &bet;
int batchStart,batchEnd, thisSize, nextSize;
RMSwanted=RMSwant;
maxEpochs=maxIter;
itemSize=pData[0].size();
int testSetSize=pTestData.size();
vector<UNCHAR> btLabels;
device_vector<double> testData[testSetSize/batchSize];
if(testSetSize) {
for(auto p:pTestLabels) {
btLabels.push_back((UNCHAR)(thrust::max_element(p.begin(), p.end())-p.begin()));
}
} else {
cudaFree(&testData);
}
int numBatches=dataSetSize/batchSize;
device_vector<double> data[numBatches];
device_vector<double> labels[numBatches];
//float *temp;
//double *tempd;
//ULLI len=pData[0].size();
//ULLI llen=pLabels[0].size();
/*for(int i=0;i<dataSetSize;++i) {
temp=&pData[i][0];
dataTemp[i]=device_vector<float>(temp, temp+len);
tempd=&pLabels[i][0];
labelsTemp[i]=device_vector<double>(tempd, tempd+llen);
}*/
//Creating pre-made batches so I can simply copy them to layer[0]
cout << "Making batches in video memory...\n";
int whichBatch=0;
for(int itemNum=0;itemNum<dataSetSize;itemNum+=batchSize) {
batchStart=0;
batchEnd=0;
data[whichBatch]=device_vector<double>(itemSize*batchSize);
memTracker(itemSize*batchSize*8,false);
labels[whichBatch]=device_vector<double>(batchSize*numOutputs);
memTracker(numOutputs*batchSize*8,false);
for(int b=0;b<batchSize;++b) {
//temp=&pData[itemNum+b][0];
//dataTemp=device_vector<float>(temp, temp+len);
//tempd=&pLabels[itemNum+b][0];
//labelsTemp=device_vector<double>(tempd, tempd+llen);
//thrust::transform(dataTemp[itemNum+b].begin(),dataTemp[itemNum+b].end(),dataTransposeTemp.begin()+batchStart,floatToDoubleFunctor());
//thrust::transform(dataTemp.begin(),dataTemp.end(),data[whichBatch].begin()+batchStart,floatToDoubleFunctor());
//thrust::transform((device_vector<float>(temp, temp+len)).begin(),(device_vector<float>(temp, temp+len)).end(),data[whichBatch].begin()+batchStart,floatToDoubleFunctor());
thrust::copy(pData[itemNum+b].begin(),pData[itemNum+b].end(),data[whichBatch].begin()+batchStart);//,floatToDoubleFunctor());
//thrust::copy(dataTemp[itemNum+b].begin(),dataTemp[itemNum+b].end(),dataTransposeTemp.begin()+batchStart);
//thrust::copy(labelsTemp[itemNum+b].begin(),labelsTemp[itemNum+b].end(),batchLabels.begin()+batchEnd);
//thrust::copy(labelsTemp.begin(),labelsTemp.end(),labels[whichBatch].begin()+batchEnd);
thrust::copy(pLabels[itemNum+b].begin(),pLabels[itemNum+b].end(),labels[whichBatch].begin()+batchEnd);
batchStart+=itemSize;
batchEnd+=numOutputs;
}
//cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, batchSize, numOutputs, alpha, batchLabels.data().get(), numOutputs, beta, batchLabels.data().get(), numOutputs, labels[whichBatch].data().get(), batchSize);
//cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, batchSize, itemSize, alpha, dataTransposeTemp.data().get(), itemSize, beta, dataTransposeTemp.data().get(), itemSize, data[whichBatch].data().get(), batchSize);
++whichBatch;
}
whichBatch=0;
for(int i=0;i<testSetSize;i+=batchSize) {
testData[whichBatch]=device_vector<double>(itemSize*batchSize);
memTracker(itemSize*batchSize*8,false);
batchStart=0;
for(int j=0;j<batchSize;++j) {
//temp=&pTestData[i+j][0];
//dataTemp=device_vector<float>(temp, temp+len);
//tempd=&pTestLabels[i][0];
//labelsTemp=device_vector<double>(tempd, tempd+llen);
//thrust::transform(dataTemp.begin(),dataTemp.end(),testData[whichBatch].begin()+batchStart,floatToDoubleFunctor());
//thrust::transform((device_vector<float>(temp, temp+len)).begin(),(device_vector<float>(temp, temp+len)).end(),testData[i].begin(),floatToDoubleFunctor());
thrust::copy(pTestData[i+j].begin(),pTestData[i+j].end(),testData[whichBatch].begin()+batchStart);
//thrust::copy(labelsTemp.begin(),labelsTemp.end(),testLabels[i].begin());
batchStart+=itemSize;
}
++whichBatch;
}
int mOut=outputsIndex-2;
/*zipIterator begin2[outputsIndex];
zipIterator end2[outputsIndex];
zipIterator begin1[outputsIndex];
zipIterator end1[outputsIndex];
for(int i=outputsIndex-1;i;--i) {
begin2[i]=zipIterator(thrust::make_tuple(NNlayers[i].atNeuronOutputs.begin(), innerDeltaB[mOut].begin()));
end2[i]=zipIterator(thrust::make_tuple(NNlayers[i].atNeuronOutputs.end(), innerDeltaB[mOut].end()));
begin1[i]=zipIterator(thrust::make_tuple(NNlayers[i].atNeuronInputs.begin(), innerDeltaB[mOut].begin()));
end1[i]=zipIterator(thrust::make_tuple(NNlayers[i].atNeuronInputs.end(), innerDeltaB[mOut--].end()));
}
backProp_helper2 backProp2;
backProp_helper backProp;
//zipIterator fBegin[layers];
//zipIterator fEnd[layers];
forwardFeed_helper forwardFeed[layers];
for(int i=1;i<layers;++i) {
forwardFeed[i]=forwardFeed_helper(NNlayers[i].atNeuronInputs.data().get(),NNlayers[i].biases.data().get());
//fBegin[i]=zipIterator(thrust::make_tuple(NNlayers[i].atNeuronInputs.begin(),NNlayers[i].biases.begin()));
//fEnd[i]=zipIterator(thrust::make_tuple(NNlayers[i].atNeuronInputs.end(),NNlayers[i].biases.end()));
}*/
//forwardFeed_helper forwardFeed;
cout << "Starting training...\n";
memTracker(0,true);
//cudaFree(&dataTemp);
//cudaFree(&labelsTemp);
//cudaFree(&dataTransposeTemp);
//cudaFree(&batchLabels);
thrust::device_vector<double>::iterator iter;
int position;
int gotRight=0, prevSize;
//toDivideRMS=learningRate;
//toDivideRMS=learningRate/((double)numBatches*(double)batchSize);
toDivideRMS=learningRate/(double)batchSize;
//toDivideRMS=learningRate/(double)numBatches;
int maxGotRight=0, maxTestRight=-1, ii;
device_vector<double> *which;
double origLearningRate=learningRate, seconds, totalTime=0.0;
high_resolution_clock::time_point startTime, endTime;
int showIntervalCountDown=showInterval;
double lastNoShowTime=0.0;
int timeEstCountDown=10, mPlus;
for(int epochNum=0;!threadExit && epochNum<maxEpochs && maxGotRight!=dataSetSize && maxTestRight!=testSetSize;++epochNum) {
whichBatch=0;
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::fill(NNlayers[ii].outerDeltaB.begin(),NNlayers[ii].outerDeltaB.end(),0.0);
thrust::fill(NNlayers[i].outerDeltaW.begin(),NNlayers[i].outerDeltaW.end(),0.0);
}//*/
if(!showIntervalCountDown) {
gotRight=0;
}
startTime=high_resolution_clock::now();
for(int itemNum=0;itemNum<dataSetSize;itemNum+=batchSize) {
//forward propagation
which=&data[whichBatch];
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[ii];
//cublasXtDgemm(handlex, CUBLAS_OP_N, CUBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayers[ii].atNeuronInputs.data().get(), nextSize);
//cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayers[ii].atNeuronInputs.data().get(), nextSize);
//thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[ii].allN),NNlayers[ii].atNeuronOutputs.begin(),forwardFeed_helper(NNlayers[ii].atNeuronInputs.data().get(),NNlayers[ii].biases.data().get()));
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayers[ii].atNeuronInputs.data().get(), nextSize);
thrust::transform(NNlayers[ii].counterN.begin(),NNlayers[ii].counterN.end(),NNlayers[ii].atNeuronOutputs.begin(),forwardFeed_helper(NNlayers[ii].atNeuronInputs.data().get(),NNlayers[ii].biases.data().get()));
which=&NNlayers[ii].atNeuronOutputs;
}
//first check how many we got right
if(!showIntervalCountDown) {
batchStart=0;
batchEnd=numOutputs;
//printf("\nbatch starting at: %d\n",itemNum);
for(int b=0;b<batchSize;++b) {
iter = thrust::max_element(NNlayers[outputsIndex].atNeuronOutputs.begin()+batchStart, NNlayers[outputsIndex].atNeuronOutputs.begin()+batchEnd);
position = iter - NNlayers[outputsIndex].atNeuronOutputs.begin();
position -= batchStart;
//printf("output: %d expected: %d\n",position,bLabels[itemNum+b]);
//for(int ot=batchStart;ot<batchEnd;++ot) {
// double oo=NNlayers[outputsIndex].atNeuronOutputs[ot];
// printf("%.5f ",oo);
//}
//printf("\n");
if(position==bLabels[itemNum+b]) {
++gotRight;
}
batchStart=batchEnd;
batchEnd+=numOutputs;
}
}
//Backward propagation
mOut=outputsIndex-1;
mPlus=outputsIndex;
prevSize=hiddenMatrix[mOut];
//which=&innerDeltaB[mOut];
thrust::transform(NNlayers[outputsIndex].counterN.begin(),NNlayers[outputsIndex].counterN.end(),NNlayers[outputsIndex].innerDeltaB.begin(),output_helper(NNlayers[outputsIndex].atNeuronOutputs.data().get(),NNlayers[outputsIndex].atNeuronInputs.data().get(),NNlayers[outputsIndex].innerDeltaB.data().get(),labels[whichBatch].data().get()));
//thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[outputsIndex].allN),(*which).begin(),output_helper(NNlayers[outputsIndex].atNeuronOutputs.data().get(),NNlayers[outputsIndex].atNeuronInputs.data().get(),(*which).data().get(),labels[whichBatch].data().get()));
//thrust::transform(counterBegin,nodesCounterEnd[outputsIndex],innerDeltaB[mOut].begin(),output_helper(NNlayers[outputsIndex].atNeuronOutputs.data().get(),NNlayers[outputsIndex].atNeuronInputs.data().get(),innerDeltaB[mOut].data().get(),labels[whichBatch].data().get()));
//cublasXtDgemm(handlex, CUBLAS_OP_N, CUBLAS_OP_T, numOutputs, prevSize, batchSize, alpha, innerDeltaB[mOut].data().get(), numOutputs, NNlayers[mOut].atNeuronOutputs.data().get(), prevSize, beta, innerDeltaW[mOut].data().get(), numOutputs);
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, numOutputs, prevSize, batchSize, alpha, NNlayers[outputsIndex].innerDeltaB.data().get(), numOutputs, NNlayers[mOut].atNeuronOutputs.data().get(), prevSize, beta, NNlayers[mOut].innerDeltaW.data().get(), numOutputs);
//cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, numOutputs, prevSize, batchSize, alpha, (*which).data().get(), numOutputs, NNlayers[mOut].atNeuronOutputs.data().get(), prevSize, beta, innerDeltaW[mOut].data().get(), numOutputs);
--mOut;
for(int i=outputsIndex-1;i;--i) {
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[i+1];
prevSize=hiddenMatrix[i-1];
//which=&innerDeltaB[mOut];
//cublasXtDgemm(handlex, CUBLAS_OP_T, CUBLAS_OP_N, thisSize, batchSize, nextSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, innerDeltaB[mOut+1].data().get(), nextSize, beta, innerDeltaB[mOut].data().get(), thisSize);
cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, thisSize, batchSize, nextSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, NNlayers[i+1].innerDeltaB.data().get(), nextSize, beta, NNlayers[i].innerDeltaB.data().get(), thisSize);
//cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, thisSize, batchSize, nextSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, innerDeltaB[mOut+1].data().get(), nextSize, beta, (*which).data().get(), thisSize);
//thrust::for_each(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[i].allW),update_w(NNlayers[i].weightsMatrix.data().get(),innerDeltaW[i].data().get(),toDivideRMS));
////thrust::for_each(counterBegin,weightsCounterEnd[i],update_w(NNlayers[i].weightsMatrix.data().get(),innerDeltaW[i].data().get(),toDivideRMS));
//thrust::for_each(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[mPlus].allN),update_b(NNlayers[mPlus].biases.data().get(),innerDeltaB[i].data().get(),toDivideRMS));
////thrust::for_each(counterBegin,nodesCounterEnd[mPlus],update_b(NNlayers[mPlus].biases.data().get(),innerDeltaB[i].data().get(),toDivideRMS));
//zipIterator begin(thrust::make_tuple(NNlayers[i].atNeuronOutputs.begin(), innerDeltaB[mOut].begin()));
//zipIterator end(thrust::make_tuple(NNlayers[i].atNeuronOutputs.end(), innerDeltaB[mOut].end()));
//thrust::transform(begin,end,innerDeltaB[mOut].begin(),backProp_helper2());
//thrust::transform(begin2[i],end2[i],innerDeltaB[mOut].begin(),backProp2);
if(i!=1) {
thrust::transform(NNlayers[i].counterN.begin(),NNlayers[i].counterN.end(),NNlayers[i].innerDeltaB.begin(),backProp_helper2(NNlayers[i].atNeuronOutputs.data().get(),NNlayers[i].innerDeltaB.data().get()));
//thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[i].allN),(*which).begin(),backProp_helper2(NNlayers[i].atNeuronOutputs.data().get(),(*which).data().get()));
//thrust::transform(counterBegin,nodesCounterEnd[i],innerDeltaB[mOut].begin(),backProp_helper2(NNlayers[i].atNeuronOutputs.data().get(),innerDeltaB[mOut].data().get()));
//cublasXtDgemm(handlex, CUBLAS_OP_N, CUBLAS_OP_T, thisSize, prevSize, batchSize, alpha, innerDeltaB[mOut].data().get(), thisSize, NNlayers[i-1].atNeuronOutputs.data().get(), prevSize, beta, innerDeltaW[mOut].data().get(), thisSize);
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, thisSize, prevSize, batchSize, alpha, NNlayers[i].innerDeltaB.data().get(), thisSize, NNlayers[i-1].atNeuronOutputs.data().get(), prevSize, beta, NNlayers[mOut].innerDeltaW.data().get(), thisSize);
//cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, thisSize, prevSize, batchSize, alpha, (*which).data().get(), thisSize, NNlayers[i-1].atNeuronOutputs.data().get(), prevSize, beta, innerDeltaW[mOut].data().get(), thisSize);
} else {
//zipIterator begin(thrust::make_tuple(NNlayers[i].atNeuronInput.begin(), innerDeltaB[mOut].begin()));
//zipIterator end(thrust::make_tuple(NNlayers[i].atNeuronInputs.end(), innerDeltaB[mOut].end()));
//thrust::transform(begin,end,innerDeltaB[mOut].begin(),backProp_helper());
//thrust::transform(begin1[i],end1[i],innerDeltaB[mOut].begin(),backProp);
thrust::transform(NNlayers[i].counterN.begin(),NNlayers[i].counterN.end(),NNlayers[i].innerDeltaB.begin(),backProp_helper(NNlayers[i].innerDeltaB.data().get(),NNlayers[i].atNeuronInputs.data().get()));
//thrust::transform(counterBegin,nodesCounterEnd[i],innerDeltaB[mOut].begin(),backProp_helper(innerDeltaB[mOut].data().get(),NNlayers[i].atNeuronInputs.data().get()));
//cublasXtDgemm(handlex, CUBLAS_OP_N, CUBLAS_OP_T, thisSize, prevSize, batchSize, alpha, innerDeltaB[mOut].data().get(), thisSize, data[whichBatch].data().get(), prevSize, beta, innerDeltaW[mOut].data().get(), thisSize);
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, thisSize, prevSize, batchSize, alpha, NNlayers[i].innerDeltaB.data().get(), thisSize, data[whichBatch].data().get(), prevSize, beta, NNlayers[mOut].innerDeltaW.data().get(), thisSize);
}
--mOut;
--mPlus;
}
//thrust::for_each(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[0].allW),update_w(NNlayers[0].weightsMatrix.data().get(),innerDeltaW[0].data().get(),toDivideRMS));
////thrust::for_each(counterBegin,weightsCounterEnd[0],update_w(NNlayers[0].weightsMatrix.data().get(),innerDeltaW[0].data().get(),toDivideRMS));
//thrust::for_each(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[1].allN),update_b(NNlayers[1].biases.data().get(),innerDeltaB[0].data().get(),toDivideRMS));
////thrust::for_each(counterBegin,nodesCounterEnd[1],update_b(NNlayers[1].biases.data().get(),innerDeltaB[0].data().get(),toDivideRMS));
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::transform(NNlayers[ii].innerDeltaB.begin(),NNlayers[ii].innerDeltaB.end(),NNlayers[ii].outerDeltaB.begin(),NNlayers[ii].outerDeltaB.begin(),thrust::plus<double>());
thrust::transform(NNlayers[i].innerDeltaW.begin(),NNlayers[i].innerDeltaW.end(),NNlayers[i].outerDeltaW.begin(),NNlayers[i].outerDeltaW.begin(),thrust::plus<double>());
}//*/
/*for(int i=0;i<outputsIndex;++i) {
thrust::for_each(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[i].allW),update_w(NNlayers[i].weightsMatrix.data().get(),innerDeltaW[i].data().get(),toDivideRMS));
thrust::for_each(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[i+1].allN),update_b(NNlayers[i+1].biases.data().get(),innerDeltaB[i].data().get(),toDivideRMS));
}//*/
++whichBatch;
}
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::for_each(NNlayers[i].counterW.begin(),NNlayers[i].counterW.end(),update_w(NNlayers[i].weightsMatrix.data().get(),NNlayers[i].outerDeltaW.data().get(),toDivideRMS));
thrust::for_each(NNlayers[ii].counterN.begin(),NNlayers[ii].counterN.end(),update_b(NNlayers[ii].biases.data().get(),NNlayers[ii].outerDeltaB.data().get(),toDivideRMS));
}//*/
if(!showIntervalCountDown) {
if(gotRight>maxGotRight){maxGotRight=gotRight;}
printf("Epoch: %d-Got %d of %d-max right: %d-lRate: %.5f",epochNum,gotRight,dataSetSize,maxGotRight,learningRate);
printf("-");
gotRight=0;
whichBatch=0;
for(int t=0;t<testSetSize;t+=batchSize) {
which=&testData[whichBatch];
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[ii];
//cublasXtDgemm(handlex, CUBLAS_OP_N, CUBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayers[ii].atNeuronInputs.data().get(), nextSize);
cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayers[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayers[ii].atNeuronInputs.data().get(), nextSize);
thrust::transform(NNlayers[ii].counterN.begin(),NNlayers[ii].counterN.end(),NNlayers[ii].atNeuronOutputs.begin(),forwardFeed_helper(NNlayers[ii].atNeuronInputs.data().get(),NNlayers[ii].biases.data().get()));
//thrust::transform(thrust::make_counting_iterator(0),thrust::make_counting_iterator(NNlayers[ii].allN),NNlayers[ii].atNeuronOutputs.begin(),forwardFeed[ii]);
which=&NNlayers[ii].atNeuronOutputs;
}
if(!showIntervalCountDown) {
batchStart=0;
batchEnd=numOutputs;
//printf("\nbatch starting at: %d\n",t);
for(int b=0;b<batchSize;++b) {
iter = thrust::max_element(NNlayers[outputsIndex].atNeuronOutputs.begin()+batchStart, NNlayers[outputsIndex].atNeuronOutputs.begin()+batchEnd);
position = iter - NNlayers[outputsIndex].atNeuronOutputs.begin();
position -= batchStart;
/*printf("output: %d expected: %d\n",position,btLabels[t+b]);
for(int ot=batchStart;ot<batchEnd;++ot) {
double oo=NNlayers[outputsIndex].atNeuronOutputs[ot];
printf("%.5f ",oo);
}
printf("\n");//*/
if(position==btLabels[t+b]) {
++gotRight;
}
batchStart=batchEnd;
batchEnd+=numOutputs;
}
}
++whichBatch;
}
if(gotRight>maxTestRight && testSetSize){maxTestRight=gotRight;}
}
if(vlRate) {
//if(epochNum>1) {
double cutOff=0.92;
double percLearned=(double)gotRight/(double)testSetSize;
if(percLearned<0.99 && percLearned>cutOff) {
percLearned=1.0-percLearned;
//percLearned=(1.0-percLearned)*2.0;
//percLearned=(1.0-percLearned)/2.0;
//percLearned=pow(1.0-percLearned,(double)layers);
//percLearned=pow(1.0-percLearned,2.0);
//alfLearn=-(percLearned*(learningRate/2.0)+(learningRate/2.0));
learningRate=(cutOff*origLearningRate)+percLearned;//-(percLearned*origLearningRate);
toDivideRMS=learningRate/(double)batchSize;
//toDivideRMS=learningRate/(double)numBatches;
} else {
if(percLearned<0.99) {
learningRate=origLearningRate;
toDivideRMS=learningRate/(double)batchSize;
//toDivideRMS=learningRate/(double)numBatches;
}
}
//}
}
endTime=high_resolution_clock::now();
seconds=duration_cast<microseconds>(endTime-startTime).count()/1000000.0;
totalTime+=seconds;
if(!showIntervalCountDown) {
double errRate=(1.0-((double)gotRight/(double)testSetSize))*100.0;
printf("Test-Got %d of %d-max right: %d-sec: %.5f-totTime: %.5f-errRate: %.5f\n",gotRight,testSetSize,maxTestRight,lastNoShowTime,totalTime,errRate);
showIntervalCountDown=showInterval;
/*if(maxTestRight!=gotRight) {
pthread_mutex_lock(&crmutex);
counterGo=true;
pthread_mutex_unlock(&crmutex);
}*/
} else {
lastNoShowTime=seconds;
--showIntervalCountDown;
if(timeEstCountDown) {
--timeEstCountDown;
if(!timeEstCountDown) {
//printf("(yes it's running...)\n");
printf("Update time interval approximately %.5f seconds apart\n",(lastNoShowTime*(double)showInterval)+5.0);
/*waitTime=(int)(lastNoShowTime*(double)showInterval);
pthread_mutex_lock(&crmutex);
counterGo=true;
pthread_mutex_unlock(&crmutex);*/
}
}
}
}
/*if(showInterval) {
pthread_mutex_lock(&crmutex);
counterExit=true;
pthread_mutex_unlock(&crmutex);
//void *result;
//pthread_join(counter, &result);
}*/
cublasDestroy(handle);
//cublasXtDestroy(handlex);
//saveState("MPIv-Cuda-");
//sleep(5);
}
neuralNet(int _numInputs, int _numOutputs, vector<int> &_hiddenMatrix, int pBatchSize, bool floata, bool floatb) :
hiddenMatrix(_hiddenMatrix), RMS(DBL_MAX), minRMS(DBL_MAX), batchSize(pBatchSize) {
cout << "in float\n";
cudaSetDevice(3);
if(batchSize<100) {
batchSize=10000;
}
cublasCreate(&handle);
numInputs=_numInputs;
numOutputs=_numOutputs;
hiddenMatrix.insert(hiddenMatrix.begin(),numInputs);
hiddenMatrix.push_back(numOutputs);
NNlayersf=vector<NN_layerf>(hiddenMatrix.size());
layers=hiddenMatrix.size();
outputsIndex=layers-1;
cout << "Setting up network...\n";
cout << "Layers: ";
for(auto h:hiddenMatrix) {
cout << h << " ";
}
cout << "Batch size: " << batchSize << endl << endl;
NNlayersf[0]=NN_layerf(hiddenMatrix[0],hiddenMatrix[1],batchSize,INPUT);
for(int i=1;i<outputsIndex;++i) {
NNlayersf[i]=NN_layerf(hiddenMatrix[i],hiddenMatrix[i+1],batchSize,HIDDEN);
}
NNlayersf[outputsIndex]=NN_layerf(hiddenMatrix[outputsIndex],0,batchSize,OUTPUT);
}
void train_MatMulf(vector<vector<float>> &pData, vector<vector<float>> &pLabels, ULLI maxIter,
float RMSwant, int doDataSetSize, float lRate, vector<vector<float>> &pTestData, vector<vector<float>> &pTestLabels, bool vlRate) {
cout << "in other float\n";
if(!showInterval) {
showInterval=10;
}
vector<UNCHAR> bLabels;
for(auto p:pLabels) {
bLabels.push_back((UNCHAR)(thrust::max_element(p.begin(), p.end())-p.begin()));
}
if(lRate<0.0f) {
learningRatef=0.05f;
} else {
learningRatef=lRate;
}
if(!doDataSetSize) {
doDataSetSize=60000;
}
dataSetSize=doDataSetSize;
const float alf = 1;
const float bet = 0;
const float *alpha = &alf;
const float *beta = &bet;
int batchStart,batchEnd, thisSize, nextSize;
RMSwanted=RMSwant;
maxEpochs=maxIter;
itemSize=pData[0].size();
int testSetSize=pTestData.size();
vector<UNCHAR> btLabels;
device_vector<float> testData[testSetSize/batchSize];
if(testSetSize) {
for(auto p:pTestLabels) {
btLabels.push_back((UNCHAR)(thrust::max_element(p.begin(), p.end())-p.begin()));
}
} else {
cudaFree(&testData);
}
int numBatches=dataSetSize/batchSize;
device_vector<float> data[numBatches];
device_vector<float> labels[numBatches];
//Creating pre-made batches so I can simply copy them to layer[0]
cout << "Making batches in video memory...\n";
int whichBatch=0;
for(int itemNum=0;itemNum<dataSetSize;itemNum+=batchSize) {
batchStart=0;
batchEnd=0;
data[whichBatch]=device_vector<float>(itemSize*batchSize);
memTracker(itemSize*batchSize*8,false);
labels[whichBatch]=device_vector<float>(batchSize*numOutputs);
memTracker(numOutputs*batchSize*8,false);
for(int b=0;b<batchSize;++b) {
thrust::copy(pData[itemNum+b].begin(),pData[itemNum+b].end(),data[whichBatch].begin()+batchStart);//,floatToDoubleFunctor());
thrust::copy(pLabels[itemNum+b].begin(),pLabels[itemNum+b].end(),labels[whichBatch].begin()+batchEnd);
batchStart+=itemSize;
batchEnd+=numOutputs;
}
++whichBatch;
}
whichBatch=0;
for(int i=0;i<testSetSize;i+=batchSize) {
testData[whichBatch]=device_vector<float>(itemSize*batchSize);
memTracker(itemSize*batchSize*8,false);
batchStart=0;
for(int j=0;j<batchSize;++j) {
thrust::copy(pTestData[i+j].begin(),pTestData[i+j].end(),testData[whichBatch].begin()+batchStart);
batchStart+=itemSize;
}
++whichBatch;
}
int mOut=outputsIndex-2;
cout << "Starting training...\n";
memTracker(0,true);
thrust::device_vector<float>::iterator iter;
int position;
int gotRight=0, prevSize;
toDivideRMSf=learningRatef/(float)batchSize;
int maxGotRight=0, maxTestRight=-1, ii;
device_vector<float> *which;
float origlearningRatef=learningRatef, seconds, totalTime=0.0f;
high_resolution_clock::time_point startTime, endTime;
int showIntervalCountDown=showInterval;
float lastNoShowTime=0.0f;
int timeEstCountDown=10, mPlus;
for(int epochNum=0;!threadExit && epochNum<maxEpochs && maxGotRight!=dataSetSize && maxTestRight!=testSetSize;++epochNum) {
whichBatch=0;
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::fill(NNlayersf[ii].outerDeltaB.begin(),NNlayersf[ii].outerDeltaB.end(),0.0f);
thrust::fill(NNlayersf[i].outerDeltaW.begin(),NNlayersf[i].outerDeltaW.end(),0.0f);
}//*/
if(!showIntervalCountDown) {
gotRight=0;
}
startTime=high_resolution_clock::now();
for(int itemNum=0;itemNum<dataSetSize;itemNum+=batchSize) {
//forward propagation
which=&data[whichBatch];
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[ii];
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayersf[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayersf[ii].atNeuronInputs.data().get(), nextSize);
thrust::transform(NNlayersf[ii].counterN.begin(),NNlayersf[ii].counterN.end(),NNlayersf[ii].atNeuronOutputs.begin(),forwardFeed_helperf(NNlayersf[ii].atNeuronInputs.data().get(),NNlayersf[ii].biases.data().get()));
which=&NNlayersf[ii].atNeuronOutputs;
}
//first check how many we got right
if(!showIntervalCountDown) {
batchStart=0;
batchEnd=numOutputs;
//printf("\nbatch starting at: %d\n",itemNum);
for(int b=0;b<batchSize;++b) {
iter = thrust::max_element(NNlayersf[outputsIndex].atNeuronOutputs.begin()+batchStart, NNlayersf[outputsIndex].atNeuronOutputs.begin()+batchEnd);
position = iter - NNlayersf[outputsIndex].atNeuronOutputs.begin();
position -= batchStart;
//printf("output: %d expected: %d\n",position,bLabels[itemNum+b]);
//for(int ot=batchStart;ot<batchEnd;++ot) {
// float oo=NNlayersf[outputsIndex].atNeuronOutputs[ot];
// printf("%.5f ",oo);
//}
//printf("\n");
if(position==bLabels[itemNum+b]) {
++gotRight;
}
batchStart=batchEnd;
batchEnd+=numOutputs;
}
}
//Backward propagation
mOut=outputsIndex-1;
mPlus=outputsIndex;
prevSize=hiddenMatrix[mOut];
thrust::transform(NNlayersf[outputsIndex].counterN.begin(),NNlayersf[outputsIndex].counterN.end(),NNlayersf[outputsIndex].innerDeltaB.begin(),output_helperf(NNlayersf[outputsIndex].atNeuronOutputs.data().get(),NNlayersf[outputsIndex].atNeuronInputs.data().get(),NNlayersf[outputsIndex].innerDeltaB.data().get(),labels[whichBatch].data().get()));
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, numOutputs, prevSize, batchSize, alpha, NNlayersf[outputsIndex].innerDeltaB.data().get(), numOutputs, NNlayersf[mOut].atNeuronOutputs.data().get(), prevSize, beta, NNlayersf[mOut].innerDeltaW.data().get(), numOutputs);
--mOut;
for(int i=outputsIndex-1;i;--i) {
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[i+1];
prevSize=hiddenMatrix[i-1];
cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, thisSize, batchSize, nextSize, alpha, NNlayersf[i].weightsMatrix.data().get(), nextSize, NNlayersf[i+1].innerDeltaB.data().get(), nextSize, beta, NNlayersf[i].innerDeltaB.data().get(), thisSize);
if(i!=1) {
thrust::transform(NNlayersf[i].counterN.begin(),NNlayersf[i].counterN.end(),NNlayersf[i].innerDeltaB.begin(),backProp_helper2f(NNlayersf[i].atNeuronOutputs.data().get(),NNlayersf[i].innerDeltaB.data().get()));
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, thisSize, prevSize, batchSize, alpha, NNlayersf[i].innerDeltaB.data().get(), thisSize, NNlayersf[i-1].atNeuronOutputs.data().get(), prevSize, beta, NNlayersf[mOut].innerDeltaW.data().get(), thisSize);
} else {
thrust::transform(NNlayersf[i].counterN.begin(),NNlayersf[i].counterN.end(),NNlayersf[i].innerDeltaB.begin(),backProp_helperf(NNlayersf[i].innerDeltaB.data().get(),NNlayersf[i].atNeuronInputs.data().get()));
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, thisSize, prevSize, batchSize, alpha, NNlayersf[i].innerDeltaB.data().get(), thisSize, data[whichBatch].data().get(), prevSize, beta, NNlayersf[mOut].innerDeltaW.data().get(), thisSize);
}
--mOut;
--mPlus;
}
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::transform(NNlayersf[ii].innerDeltaB.begin(),NNlayersf[ii].innerDeltaB.end(),NNlayersf[ii].outerDeltaB.begin(),NNlayersf[ii].outerDeltaB.begin(),thrust::plus<float>());
thrust::transform(NNlayersf[i].innerDeltaW.begin(),NNlayersf[i].innerDeltaW.end(),NNlayersf[i].outerDeltaW.begin(),NNlayersf[i].outerDeltaW.begin(),thrust::plus<float>());
}//*/
++whichBatch;
}
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thrust::for_each(NNlayersf[i].counterW.begin(),NNlayersf[i].counterW.end(),update_wf(NNlayersf[i].weightsMatrix.data().get(),NNlayersf[i].outerDeltaW.data().get(),toDivideRMSf));
thrust::for_each(NNlayersf[ii].counterN.begin(),NNlayersf[ii].counterN.end(),update_bf(NNlayersf[ii].biases.data().get(),NNlayersf[ii].outerDeltaB.data().get(),toDivideRMSf));
}//*/
if(!showIntervalCountDown) {
if(gotRight>maxGotRight){maxGotRight=gotRight;}
printf("Epoch: %d-Got %d of %d-max right: %d-lRate: %.5f",epochNum,gotRight,dataSetSize,maxGotRight,learningRatef);
printf("-");
gotRight=0;
whichBatch=0;
for(int t=0;t<testSetSize;t+=batchSize) {
which=&testData[whichBatch];
for(int i=0;i<outputsIndex;++i) {
ii=i+1;
thisSize=hiddenMatrix[i];
nextSize=hiddenMatrix[ii];
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, nextSize, batchSize, thisSize, alpha, NNlayersf[i].weightsMatrix.data().get(), nextSize, (*which).data().get(), thisSize, beta, NNlayersf[ii].atNeuronInputs.data().get(), nextSize);
thrust::transform(NNlayersf[ii].counterN.begin(),NNlayersf[ii].counterN.end(),NNlayersf[ii].atNeuronOutputs.begin(),forwardFeed_helperf(NNlayersf[ii].atNeuronInputs.data().get(),NNlayersf[ii].biases.data().get()));
which=&NNlayersf[ii].atNeuronOutputs;
}
if(!showIntervalCountDown) {
batchStart=0;
batchEnd=numOutputs;
//printf("\nbatch starting at: %d\n",t);
for(int b=0;b<batchSize;++b) {
iter = thrust::max_element(NNlayersf[outputsIndex].atNeuronOutputs.begin()+batchStart, NNlayersf[outputsIndex].atNeuronOutputs.begin()+batchEnd);
position = iter - NNlayersf[outputsIndex].atNeuronOutputs.begin();
position -= batchStart;
/*printf("output: %d expected: %d\n",position,btLabels[t+b]);
for(int ot=batchStart;ot<batchEnd;++ot) {
float oo=NNlayersf[outputsIndex].atNeuronOutputs[ot];
printf("%.5f ",oo);
}
printf("\n");//*/
if(position==btLabels[t+b]) {
++gotRight;
}
batchStart=batchEnd;
batchEnd+=numOutputs;
}
}
++whichBatch;
}
if(gotRight>maxTestRight && testSetSize){maxTestRight=gotRight;}
}
endTime=high_resolution_clock::now();
seconds=duration_cast<microseconds>(endTime-startTime).count()/1000000.0f;
totalTime+=seconds;
if(!showIntervalCountDown) {
float errRate=(1.0f-((float)gotRight/(float)testSetSize))*100.0f;
printf("Test-Got %d of %d-max right: %d-sec: %.5f-totTime: %.5f-errRate: %.5f\n",gotRight,testSetSize,maxTestRight,lastNoShowTime,totalTime,errRate);
showIntervalCountDown=showInterval;
} else {
lastNoShowTime=seconds;
--showIntervalCountDown;
if(timeEstCountDown) {
--timeEstCountDown;
if(!timeEstCountDown) {
printf("Update time interval approximately %.5f seconds apart\n",(lastNoShowTime*(float)showInterval)+5.0f);
}
}
}
}
cublasDestroy(handle);
}
void saveState(string outFile) {
outFile+="-"+to_string(dataSetSize);
cout << "Writing weights to file: " << outFile << endl;
ofstream oFile(outFile, ios::binary|ios::out);
if(oFile.is_open()) {
oFile.write((char*)&epoch,sizeof(ULLI));
oFile.write((char*)&layers,sizeof(ULLI));
for(int i=0;i<hiddenMatrix.size();++i) {
oFile.write((char*)&hiddenMatrix[i],sizeof(int));
}
oFile.write((char*)&batchSize,sizeof(int));
oFile.write((char*)&learningRate,sizeof(double));
for(int i=0;i<outputsIndex;++i) {
for(int j=0;j<NNlayers[i].allW;++j) {
double o=NNlayers[i].weightsMatrix[j];
oFile.write((char*)&o,sizeof(double));
}
}
for(int i=1;i<layers;++i) {
for(int j=0;j<NNlayers[i].allN;++j) {
double o=NNlayers[i].biases[j];
oFile.write((char*)&o,sizeof(double));
}
}
oFile.close();
}
cout << "Done\n";
}
void loadState() {
cout << "Reading weights from file: " << inFile << endl;
ifstream oFile(inFile, ios::binary|ios::in);
if(oFile.is_open()) {
oFile.read((char*)&epoch,sizeof(ULLI));
oFile.read((char*)&layers,sizeof(ULLI));
hiddenMatrix.clear();
for(int i=0;i<layers;++i) {
int l=0;
oFile.read((char*)&l,sizeof(int));
hiddenMatrix.push_back(l);
}
oFile.read((char*)&batchSize,sizeof(int));
oFile.read((char*)&learningRate,sizeof(double));
outputsIndex=layers-1;
numInputs=hiddenMatrix[0]-1;
numOutputs=hiddenMatrix[outputsIndex];
NNlayers.clear();
int type=INPUT;
for(int i=0;i<outputsIndex;++i) {
if(i){type=HIDDEN;}
NNlayers.push_back(NN_layer(hiddenMatrix[i],hiddenMatrix[i+1],batchSize,type));
for(int j=0;j<NNlayers[i].allW;++j) {
double o=0.0;
oFile.read((char*)&o,sizeof(double));
NNlayers[i].weightsMatrix.push_back(o);
}
NNlayers[i].setupLayer(false);
}
NNlayers.push_back(NN_layer(hiddenMatrix[outputsIndex],0,batchSize,OUTPUT));
NNlayers.back().setupLayer(false);
for(int i=1;i<layers;++i) {
for(int j=0;j<NNlayers[i].allN;++j) {
double o=0.0;
oFile.read((char*)&o,sizeof(double));
NNlayers[i].biases.push_back(o);
}
}
oFile.close();
}
cout << "Done\n";
}
vector<NN_layer> NNlayers;
vector<NN_layerf> NNlayersf;
vector<NN_layer> NNlayersQ[2];
private:
ULLI epoch, maxElement, layers, maxEpochs;//, maxWeightsMatrix, maxDeltaMatrix;
int outputsIndex, dataSetSize, numInputs, numOutputs, batchSize;
double RMS, minRMS, toDivideRMS, RMSwanted, learningRate;
float toDivideRMSf, learningRatef;
vector<int> hiddenMatrix;
cublasHandle_t handle;
ULLI itemSize;
string inFile;
ULLI neededEpochs;
vector<vector<double>> neuralNet_weights_host;
};
void doMain(vector<int> &inputHiddenLayers, int batchSize, int doDataSetSize, double lRate, string inFile, string outFile, bool vlRate, int numDevs) {
if(doMNISTprob) {
vector<int> hiddenMatrix;
if(!inputHiddenLayers.size()) {
hiddenMatrix.push_back(200);
hiddenMatrix.push_back(100);
//hiddenMatrix.push_back(10);
//hiddenMatrix.push_back(784+(784/2));
//hiddenMatrix.push_back(784+(784/2));
//hiddenMatrix.push_back(784);
//hiddenMatrix.push_back(784);
} else {
for(auto h:inputHiddenLayers) {
hiddenMatrix.push_back(h);
}
}
vector<vector<double>> testData(10000);
ReadMNIST_double("t10k-images.idx3-ubyte",10000,784,testData);
vector<vector<double>> trainData(60000);
ReadMNIST_double("train-images.idx3-ubyte",60000,784,trainData);//*/
vector<vector<float>> testDataf(10000);
ReadMNIST_float("t10k-images.idx3-ubyte",10000,784,testDataf);
vector<vector<float>> trainDataf(60000);
ReadMNIST_float("train-images.idx3-ubyte",60000,784,trainDataf);//*/
vector<vector<double>> testLabels(10000);
vector<vector<double>> trainLabels(60000);
vector<vector<float>> testLabelsf(10000);
vector<vector<float>> trainLabelsf(60000);
//vector<UNCHAR> testLabels2;//(10000);
//vector<UNCHAR> trainLabels2;//(60000);
ifstream file("t10k-labels.idx1-ubyte",ios::binary);
if(file.is_open()) {
int placeHolder=0;
file.read((char*)&placeHolder,sizeof(placeHolder));
file.read((char*)&placeHolder,sizeof(placeHolder));
for(int i=0;i<10000;++i) {
testLabels[i]=vector<double>(10,0.0);
testLabelsf[i]=vector<float>(10,0.0f);
//testLabels[i]=vector<float>(10,0.0f);
UNCHAR temp=0;
file.read((char*)&temp,1);
for(UNCHAR j=0;j<10;++j) {
if(j==temp) {
//testLabels[i].push_back(1.0);
//testLabels[i][j]=1.0f;
testLabels[i][j]=1.0;
testLabelsf[i][j]=1.0f;
//testLabels2.push_back(temp);
} /*else {
//testLabels[i].push_back(0.0);
testLabels[i][j]=0.0;
}*/
}
}
file.close();
}
//cout << "testLabels2 size: " << testLabels2.size() << endl;
ifstream file2("train-labels.idx1-ubyte",ios::binary);
if(file2.is_open()) {
int placeHolder=0;
file2.read((char*)&placeHolder,sizeof(placeHolder));
file2.read((char*)&placeHolder,sizeof(placeHolder));
for(int i=0;i<60000;++i) {
trainLabels[i]=vector<double>(10,0.0);
trainLabelsf[i]=vector<float>(10,0.0f);
//trainLabels[i]=vector<float>(10,0.0f);
UNCHAR temp=0;
file2.read((char*)&temp,1);
for(UNCHAR j=0;j<10;++j) {
if(j==temp) {
//trainLabels[i].push_back(1.0);
//trainLabels[i][j]=1.0f;
trainLabels[i][j]=1.0;
trainLabelsf[i][j]=1.0f;
//trainLabels2.push_back(temp);
} /*else {
//trainLabels[i].push_back(0.0);
trainLabels[i][j]=0.0;
}*/
}
}
file2.close();
}
//cout << "trainLabels2 size: " << trainLabels2.size() << endl;
//vector<UNCHAR> temp;
//for(auto p:trainData[1]) {
// temp.push_back((UNCHAR)(p*255.0f));
// cout << (int)temp.back() << endl;
//}
//UNCHAR* t=&temp[0];
//intarray2bmp::intarray2bmp("outputtest.bmp",t,(UNCHAR)28,(UNCHAR)28,(UNCHAR)0,(UNCHAR)255);
neuralNet go;
if(inFile=="") {
//go=neuralNet(784,10,hiddenMatrix,batchSize,numDevs);
//go=neuralNet(784,10,hiddenMatrix,batchSize);
go=neuralNet(784,10,hiddenMatrix,batchSize,true,true);
} else {
go=neuralNet(inFile);
}
auto start = high_resolution_clock::now();
//go.train_floats(trainData,trainLabels,1000000,0.0001,trainLabels2);
//go.train(trainData,trainLabels,1000000,0.0001,trainLabels2, doDataSetSize);//*/
//go.train_Quad(trainData,trainLabels, 1000000, 0.0001, doDataSetSize, lRate, testData, testLabels, vlRate);//*/
go.train_MatMulf(trainDataf,trainLabelsf, 1000000, 0.0001, doDataSetSize, lRate, testDataf, testLabelsf, vlRate);//*/
//go.train_MatMul(trainData,trainLabels, 1000000, 0.0001, doDataSetSize, lRate, testData, testLabels, vlRate);//*/
//go.evaluate(testData,testLabels,testLabels2, doDataSetSize);
auto endTime = high_resolution_clock::now();
printTime(start,endTime);
}
if(doBinaryProb) {
vector<int> hiddenMatrix;
hiddenMatrix.push_back(BITS+(BITS/2));
//hiddenMatrix.push_back(BITS+(BITS/2));
for(int i=0;i<1;++i) {
hiddenMatrix.push_back(BITS+(BITS/2));
//hiddenMatrix.push_back(12);
}
//vector<vector<neuron_t>> countingTest;
//vector<vector<double>> countingLabels;
int size=pow(2,BITS);
neuralNet test(BITS,BITS,hiddenMatrix,batchSize,numDevs);
vector<vector<double>> countingTest;
vector<vector<double>> countingLabels;
for(int i=0;i<size;++i) {
countingTest.push_back(vector<double>(BITS));
countingLabels.push_back(vector<double>(BITS,0.0));
//countingLabels[i]=vector<double>(BITS,0.0);
//countingTest[i]=vector<neuron_t>(BITS);
for(int j=0;j<BITS;++j) {
//countingTest.back()[j].output=(double)bitset<BITS>(i)[(BITS-1)-j];
//countingLabels.back()[j]=(double)bitset<BITS>((i+1)%size)[(BITS-1)-j];
countingTest[i][j]=(double)bitset<BITS>(i)[(BITS-1)-j];
countingLabels[i][j]=(double)bitset<BITS>((i+1)%size)[(BITS-1)-j];
}
}
test.train_Quad(countingTest,countingLabels,1000000,0.00001,size,lRate,countingTest,countingLabels,vlRate);
}
}
int main(int argc, char *argv[]) {
/*cudaSetDevice(1);
cudaDeviceReset();
cudaSetDevice(2);
cudaDeviceReset();
return 0;*/
struct sigaction ctrlc;
ctrlc.sa_handler=ctrlchandler;
ctrlc.sa_flags=0;
sigemptyset(&ctrlc.sa_mask);
sigaction(SIGQUIT,&ctrlc,NULL);
string inFile="";
string outFile="";
int doDataSetSize=0;
int batchSize=5;
if(doBinaryProb) {
batchSize=4;
}
double lRate=-1.0;
bool vlRate=false;
if(!vlRate){}
vector<int> inputHiddenLayers;
showInterval=0;
for(int i=1;i<argc;++i) {
string temp=string(argv[i]);
if(temp.find("showInterval=")!=string::npos) {
sscanf(argv[i],"showInterval=%d",&showInterval);
continue;
}
if(temp.find("showTrain")!=string::npos) {
showCorrectNumTrain=true;
continue;
}
if(temp.find("vlRate")!=string::npos) {
vlRate=true;
}
if(temp.find("outWeights=")!=string::npos) {
outFile=temp.substr(11,temp.size());
continue;
}
if(temp.find("inWeights=")!=string::npos) {
inFile=temp.substr(10,temp.size());
continue;
}
if(temp.find("setSize=")!=string::npos) {
sscanf(argv[i],"setSize=%d",&doDataSetSize);
continue;
}
if(temp.find("batchSize=")!=string::npos) {
sscanf(argv[i],"batchSize=%d",&batchSize);
continue;
}
if(temp.find("learningRate=")!=string::npos) {
sscanf(argv[i],"learningRate=%lf",&lRate);
continue;
}
if(temp.find("layers=")!=string::npos) {
temp.erase(0,7);
int where;
int what=1;
while(what) {
if(temp.find(",")!=string::npos) {
where=temp.find(",");
string temp2=string(temp.begin(),temp.begin()+where);
sscanf(temp2.c_str(),"%d",&what);
inputHiddenLayers.push_back(what);
temp.erase(0,where+1);
} else {
what=0;
}
}
sscanf(temp.c_str(),"%d",&what);
inputHiddenLayers.push_back(what);
}
}
//Cuda doesn't like this first one
//srandom(time_point_cast<nanoSec>(high_resolution_clock::now()).time_since_epoch().count());
srand((unsigned int)time_point_cast<nanoSec>(high_resolution_clock::now()).time_since_epoch().count());
/*int my_rank, num_nodes;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &num_nodes);
char my_host[100];
gethostname(my_host, 100);
string hostname=string(my_host);
printf("%s\n",hostname.c_str());*/
/*int deviceCount = 0;
size_t mem_tot_0 = 0;
size_t mem_free_0 = 0;*/
int numDevs=2;
pthread_barrier_init(&barrier, NULL, numDevs+1);
pthread_barrier_init(&barrier2, NULL, numDevs+1);
/*ULLI totalCudaMem=0;
size_t totalFreeCudaMem;
int device_num;
//This code is from deviceQuery.cpp as seen in /usr/local/cuda-8.0/samples
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if(deviceCount) {
cudaGetDevice(&device_num);
cudaMemGetInfo(&mem_free_0, & mem_tot_0);
totalFreeCudaMem=mem_free_0;
ULLI dmask=1;
ULLI maxDiv=1;
for(int i=0;i<sizeof(ULLI)*8;++i) {
if(dmask&totalFreeCudaMem) {
maxDiv=dmask/2;
}
dmask<<=1;
}
maxDiv/=8;
}
int dev=0;
for (dev = 0; dev < deviceCount; ++dev) {
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
/*printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
if(!dev) {
char msg[256];
sprintf(msg, " Total amount of global memory: %.0f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/1048576.0f,
(ULLI) deviceProp.totalGlobalMem);
totalCudaMem=(ULLI)deviceProp.totalGlobalMem;
printf("%s", msg);
printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n",
deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);
printf(" GPU Max Clock rate: %.0f MHz (%0.2f GHz)\n\n",
deviceProp.clockRate * 1e-3f,
deviceProp.clockRate * 1e-6f);
printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
}
cudaMemGetInfo(&mem_free_0, &mem_tot_0);
cout << "Total free cuda memory: " << mem_free_0 << endl;*/
//}
//if(totalCudaMem) {
//cout << string(my_host) << ": total Cuda Memory: " << totalCudaMem << endl;
//cout << "Total Cuda Memory: " << totalCudaMem << endl;
//}
//}
cudaSetDevice(0);
//cuda thread test
/*
double test=(double)doDataSetSize/4.0;
int test2=(int)test;
if((double)test2!=test) {
cout << "setSize must be divisible by four\n";
MPI_Finalize();
return 0;
}
device_vector<double> data(25,0.0);
device_vector<double> labels(25,0.0);
cudaMemGetInfo(&mem_free_0, &mem_tot_0);
//totalFreeCudaMem=mem_free_0;
cout << "Total free cuda memory: " << mem_free_0 << endl;
//random_doubles(thrust::raw_pointer_cast(&data[0]),5,5);
//random_doubles(thrust::raw_pointer_cast(&labels[0]),5,5);
for(int i=0;i<25;++i) {
data[i]=(double)i;
labels[i]=(double)i;
}
int index=0;
int index2=0;
int testI=2500;
vector<pthread_t> threads;
for(int j=0;j<deviceCount;++j) {
threads.push_back(pthread_t());
idLink *arg = (idLink*)malloc(sizeof(*arg));
(*arg).whichThread=j;
(*arg).batchStart=index;
index+=test2;
(*arg).batchEnd=index;
(*arg).testStart=index2;
index2+=testI;
(*arg).testEnd=index2;
(*arg).data=&data;
(*arg).labels=&labels;
pthread_create (&threads.at(j), NULL, cudaThread, arg);
}
cudaMemGetInfo(&mem_free_0, &mem_tot_0);
//totalFreeCudaMem=mem_free_0;
cout << "Total free cuda memory: " << mem_free_0 << endl;
int status;
void * result;
for (int i=0; i < deviceCount; ++i) {
if ((status = pthread_join(threads.at(i), &result)) != 0) {
fprintf (stderr, "join error %d: %s\n", status, strerror(status));
exit (1);
}
}*/
doMain(inputHiddenLayers, batchSize, doDataSetSize, lRate, inFile, outFile, vlRate, numDevs);
//MPI_Finalize();
//doMain(0,"",0);
return 0;
}
int ReverseInt(int i) {
UNCHAR ch1, ch2, ch3, ch4;
ch1=i&255;
ch2=(i>>8)&255;
ch3=(i>>16)&255;
ch4=(i>>24)&255;
return((int)ch1<<24)+((int)ch2<<16)+((int)ch3<<8)+ch4;
}
void ReadMNIST_double(string filename, int NumberOfImages, int DataOfAnImage, vector<vector<double>> &arr) {
arr.resize(NumberOfImages,vector<double>(DataOfAnImage));
ifstream file(filename,ios::binary);
if (file.is_open()) {
int magic_number=0;
int number_of_images=0;
int n_rows=0;
int n_cols=0;
file.read((char*)&magic_number,sizeof(magic_number));
magic_number= ReverseInt(magic_number);
file.read((char*)&number_of_images,sizeof(number_of_images));
number_of_images= ReverseInt(number_of_images);
file.read((char*)&n_rows,sizeof(n_rows));
n_rows= ReverseInt(n_rows);
file.read((char*)&n_cols,sizeof(n_cols));
n_cols= ReverseInt(n_cols);
for(int i=0;i<number_of_images;++i) {
arr[i]=vector<double>();
for(int r=0;r<n_rows;++r) {
for(int c=0;c<n_cols;++c) {
UNCHAR temp=0;
file.read((char*)&temp,sizeof(temp));
//arr[i][(n_rows*r)+c]= ((float)temp)/256.0f;
//cout << "from read: " << ((float)temp)/256.0f << ": ";
arr[i].push_back(((double)temp)/256.0);
//arr[i].push_back((float)temp);
//cout << "from arr: " << arr[i].back() << endl;
}
}
}
}
file.close();
}
void ReadMNIST_float(string filename, int NumberOfImages, int DataOfAnImage, vector<vector<float>> &arr) {
arr.resize(NumberOfImages,vector<float>(DataOfAnImage));
ifstream file(filename,ios::binary);
if (file.is_open()) {
int magic_number=0;
int number_of_images=0;
int n_rows=0;
int n_cols=0;
file.read((char*)&magic_number,sizeof(magic_number));
magic_number= ReverseInt(magic_number);
file.read((char*)&number_of_images,sizeof(number_of_images));
number_of_images= ReverseInt(number_of_images);
file.read((char*)&n_rows,sizeof(n_rows));
n_rows= ReverseInt(n_rows);
file.read((char*)&n_cols,sizeof(n_cols));
n_cols= ReverseInt(n_cols);
for(int i=0;i<number_of_images;++i) {
arr[i]=vector<float>();
for(int r=0;r<n_rows;++r) {
for(int c=0;c<n_cols;++c) {
UNCHAR temp=0;
file.read((char*)&temp,sizeof(temp));
//arr[i][(n_rows*r)+c]= ((float)temp)/256.0f;
//cout << "from read: " << ((float)temp)/256.0f << ": ";
arr[i].push_back(((float)temp)/256.0f);
//arr[i].push_back((float)temp);
//cout << "from arr: " << arr[i].back() << endl;
}
}
}
}
file.close();
}
void printTime(high_resolution_clock::time_point start, high_resolution_clock::time_point end) {
double seconds=duration_cast<microseconds>(end-start).count()/1000000.0;
cout << "Processing time (milliseconds): " << duration_cast<milliseconds>(end - start).count() << endl;
cout << "Processing time (microseconds): " << duration_cast<microseconds>(end - start).count() << endl;
cout << "Processing time (nanoseconds): " << duration_cast<nanoseconds>(end - start).count() << endl;
printf("Processing time (seconds): %.04f\n",seconds);
}
void print_matrix(device_vector<double> &A, int nr_rows_A, int nr_cols_A) {
for(int i = 0; i < nr_rows_A; ++i){
for(int j = 0; j < nr_cols_A; ++j){
//cout << A[j * nr_rows_A + i] << " ";
double o=A[j*nr_rows_A+i];
printf("%.4f ",o);
//printf("%.10f ",A[j*nr_rows_A+i]);
}
cout << endl;
}
//cout << endl;
}
|
224759ed6d006c911ac22ee0a03d699073b70b13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reorganize_gplex.h"
#include <stdio.h>
#include "FitterCU.h"
#include "accessors_cu.h"
#include "Track.h"
#include "gpu_utils.h"
__device__ float *get_posArray(Hit &hit) {
return hit.posArrayCU();
}
__device__ float *get_errArray(Hit &hit) {
return hit.errArrayCU();
}
__device__ float *get_posArray(Track &track) {
return track.posArrayCU();
}
__device__ float *get_errArray(Track &track) {
return track.errArrayCU();
}
template <typename GPlexObj>
__device__ void SlurpIn_fn(GPlexObj to, // float *fArray, int stride, int kSize,
const char *arr, const int *vi, const int N) {
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (j<N) {
const int *XHitPos = vi;
const int off = XHitPos[j] * sizeof(Hit);
for (int i = 0; i < to.kSize; ++i) { // plex_size
to[j + to.stride*i] = *(decltype(to.ptr)) (arr + i*sizeof(decltype(*to.ptr)) + off);
}
}
}
template <typename GPlexObj>
__device__ void SlurpInIdx_fn(GPlexObj to,
const char *arr, const int idx, const int N) {
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (j<N) {
for (int i = 0; i < to.kSize; ++i) { // plex_size
auto tmp = *(decltype(to.ptr)) (arr + i*sizeof(decltype(*to.ptr)) + idx);
to[j + to.stride*i] = tmp;
}
}
}
template <typename GPlexObj>
__device__ void SlurpOutIdx_fn(GPlexObj from, // float *fArray, int stride, int kSize,
const char *arr, const int idx, const int N) {
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (j<N) {
for (int i = 0; i < from.kSize; ++i) { // plex_size
* (decltype(from.ptr)) (arr + i*sizeof(decltype(*from.ptr)) + idx) = from[j + from.stride*i];
}
}
}
__device__
void GetHitErr(GPlexHS& msErr, const char* array, const int beg, const int end)
{
SlurpInIdx_fn(msErr, array, beg, end);
}
__device__
void GetHitPar(GPlexHV& msPar, const char* array, const int beg, const int end)
{
SlurpInIdx_fn(msPar, array, beg, end);
}
__device__ void HitToMs_fn(GPlexHS &msErr, GPlexHV &msPar,
Hit *hits, const GPlexQI &XHitSize,
const GPlexHitIdx &XHitArr,
GPlexQI &HitsIdx, const int hit_cnt,
const int itrack, const int N) {
if (itrack < N) {
const char *varr = (char*) hits;
const int off_error = (char*) hits[0].errArrayCU() - varr;
const int off_param = (char*) hits[0].posArrayCU() - varr;
if (hit_cnt < XHitSize[itrack]) {
HitsIdx[itrack] = XHitArr(itrack, hit_cnt, 0) * sizeof(Hit);
}
SlurpInIdx_fn(msErr, varr + off_error, HitsIdx[itrack], N);
SlurpInIdx_fn(msPar, varr + off_param, HitsIdx[itrack], N);
}
}
__global__ void HitToMs_kernel(GPlexHS msErr, GPlexHV msPar, Hit *hits,
const GPlexQI XHitSize, const GPlexHitIdx XHitArr,
GPlexQI HitsIdx, const int hit_cnt, const int N) {
int itrack = threadIdx.x + blockDim.x * blockIdx.x;
HitToMs_fn(msErr, msPar, hits, XHitSize, XHitArr, HitsIdx, hit_cnt, itrack, N);
}
void HitToMs_wrapper(const hipStream_t& stream,
GPlexHS &msErr, GPlexHV &msPar, LayerOfHitsCU &layer,
const GPlexQI &XHitSize, const GPlexHitIdx &XHitArr,
GPlexQI &HitsIdx, int hit_cnt, const int N) {
int gridx = ::min((N-1)/BLOCK_SIZE_X + 1,
max_blocks_x);
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
hipLaunchKernelGGL(( HitToMs_kernel) , dim3(grid), dim3(block), 0 , stream ,
msErr, msPar, layer.m_hits.data(), XHitSize, XHitArr, HitsIdx, hit_cnt, N);
/*hipDeviceSynchronize();*/
}
__device__ void InputTracksCU_fn (Track *tracks,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexQI &Chg, GPlexQF &Chi2,
GPlexQI &Label, GPlexQI *HitsIdx,
const int beg, const int end,
const int itrack, const int N) {
if (itrack < (end-beg) && itrack < N) {
Track &trk = tracks[beg];
const char *varr = (char*) &trk;
int off_error = (char*) trk.errArrayCU() - varr;
int off_param = (char*) trk.posArrayCU() - varr;
int i= itrack + beg;
const Track &trk_i = tracks[i];
int idx = (char*) &trk_i - varr;
Label(itrack, 0, 0) = tracks[i].label();
Chg(itrack, 0, 0) = tracks[i].charge();
Chi2(itrack, 0, 0) = tracks[i].chi2();
SlurpInIdx_fn(Err_iP, varr + off_error, idx, N);
SlurpInIdx_fn(Par_iP, varr + off_param, idx, N);
for (int hi = 0; hi < 3; ++hi)
HitsIdx[hi](itrack, 0, 0) = tracks[i].getHitIdx(hi);//dummy value for now
}
}
__global__ void InputTracksCU_kernel(Track *tracks,
GPlexLS Err_iP, GPlexLV Par_iP,
GPlexQI Chg, GPlexQF Chi2, GPlexQI Label,
GPlexQI *HitsIdx,
int beg, int end, int N) {
int itrack = threadIdx.x + blockDim.x*blockIdx.x;
InputTracksCU_fn(tracks, Err_iP, Par_iP, Chg, Chi2, Label, HitsIdx, beg, end, itrack, N);
}
void InputTracksCU_wrapper(const hipStream_t &stream,
const EtaBinOfCandidatesCU &etaBin,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexQI &Chg, GPlexQF &Chi2, GPlexQI &Label,
GPlexQI *HitsIdx,
const int beg, const int end, const bool inputProp, int N) {
int gridx = ::min((N-1)/BLOCK_SIZE_X + 1,
max_blocks_x);
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
hipLaunchKernelGGL(( InputTracksCU_kernel) , dim3(grid), dim3(block), 0, stream ,
etaBin.m_candidates, Err_iP, Par_iP, Chg, Chi2, Label, HitsIdx,
beg, end, N);
}
__device__ void InputTracksAndHitsCU_fn (Track *tracks, LayerOfHitsCU *layerHits,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexHS *msErr_arr, GPlexHV *msPar_arr,
GPlexQI &Chg, GPlexQF &Chi2,
GPlexQI &Label, GPlexQI *HitsIdx,
const int beg, const int end,
const int itrack, const int N) {
if (itrack < (end-beg) && itrack < N) {
Track &trk = tracks[beg];
const char *varr = (char*) &trk;
int off_error = (char*) trk.errArrayCU() - varr;
int off_param = (char*) trk.posArrayCU() - varr;
int i= itrack + beg;
const Track &trk_i = tracks[i];
int idx = (char*) &trk_i - varr;
Label(itrack, 0, 0) = tracks[i].label();
Chg(itrack, 0, 0) = tracks[i].charge();
Chi2(itrack, 0, 0) = tracks[i].chi2();
SlurpInIdx_fn(Err_iP, varr + off_error, idx, N);
SlurpInIdx_fn(Par_iP, varr + off_param, idx, N);
// Note Config::nLayers -- not suitable for building
for (int hi = 0; hi < Config::nLayers; ++hi) {
int hidx = tracks[i].getHitIdx(hi);
Hit &hit = layerHits[hi].m_hits[hidx];
HitsIdx[hi](itrack, 0, 0) = idx;
if (hidx < 0) continue;
SlurpInIdx_fn(msErr_arr[hi], (char *)hit.errArrayCU(), 0, N);
SlurpInIdx_fn(msPar_arr[hi], (char *)hit.posArrayCU(), 0, N);
}
}
}
__global__ void InputTracksAndHitsCU_kernel(Track *tracks, LayerOfHitsCU *layers,
GPlexLS Err_iP, GPlexLV Par_iP,
GPlexHS *msErr_arr, GPlexHV *msPar_arr,
GPlexQI Chg, GPlexQF Chi2, GPlexQI Label,
GPlexQI *HitsIdx,
int beg, int end, int N) {
int itrack = threadIdx.x + blockDim.x*blockIdx.x;
InputTracksAndHitsCU_fn(tracks, layers, Err_iP, Par_iP, msErr_arr, msPar_arr,
Chg, Chi2, Label, HitsIdx, beg, end, itrack, N);
}
void InputTracksAndHitsCU_wrapper(const hipStream_t &stream,
Track *tracks, EventOfHitsCU &event_of_hits,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexHS *msErr_arr, GPlexHV *msPar_arr,
GPlexQI &Chg, GPlexQF &Chi2, GPlexQI &Label,
GPlexQI *HitsIdx,
const int beg, const int end,
const bool inputProp, int N) {
int gridx = ::min((N-1)/BLOCK_SIZE_X + 1,
max_blocks_x);
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
hipLaunchKernelGGL(( InputTracksAndHitsCU_kernel) , dim3(grid), dim3(block), 0, stream ,
tracks, event_of_hits.m_layers_of_hits.data(),
Err_iP, Par_iP,
msErr_arr, msPar_arr,
Chg, Chi2, Label, HitsIdx,
beg, end, N);
}
__device__ void OutputParErrCU_fn(Track *tracks,
const GPlexLS &Err, const GPlexLV &Par,
const int beg, const int end,
const int itrack_plex, const int N) {
Track &trk = tracks[beg];
const char *varr = (char*) &trk;
int off_error = (char*) trk.errArrayCU() - varr;
int off_param = (char*) trk.posArrayCU() - varr;
int i= itrack_plex + beg;
const Track &trk_i = tracks[i];
int idx = (char*) &trk_i - varr;
SlurpOutIdx_fn(Err, varr + off_error, idx, N);
SlurpOutIdx_fn(Par, varr + off_param, idx, N);
}
__device__ void OutputParErrCU_fn_seed(Track *tracks,
const GPlexLS &Err, const GPlexLV &Par,
const int iseed_ev,
const int icand_ev,
int N) {
Track &trk = tracks[0];
const char *varr = (char*) &trk;
int off_error = (char*) trk.errArrayCU() - varr;
int off_param = (char*) trk.posArrayCU() - varr;
int i= iseed_ev * Config::maxCandsPerSeed + icand_ev;
const Track &trk_i = tracks[i];
int idx = (char*) &trk_i - varr;
SlurpOutIdx_fn(Err, varr + off_error, idx, N);
SlurpOutIdx_fn(Par, varr + off_param, idx, N);
}
__device__ void OutputTracksCU_fn(Track *tracks,
const GPlexLS &Err_iP, const GPlexLV &Par_iP,
const GPlexQI &Chg, const GPlexQF &Chi2,
const GPlexQI &Label, const GPlexQI *HitsIdx,
const int beg, const int end,
const int itrack, const int N,
const bool update_hit_idx) {
if (itrack < (end-beg) && itrack < N) {
Track &trk = tracks[beg];
const char *varr = (char*) &trk;
int off_error = (char*) trk.errArrayCU() - varr;
int off_param = (char*) trk.posArrayCU() - varr;
int i= itrack + beg;
const Track &trk_i = tracks[i];
int idx = (char*) &trk_i - varr;
SlurpOutIdx_fn(Err_iP, varr + off_error, idx, N);
SlurpOutIdx_fn(Par_iP, varr + off_param, idx, N);
tracks[i].setCharge(Chg(itrack, 0, 0));
tracks[i].setChi2(Chi2(itrack, 0, 0));
tracks[i].setLabel(Label(itrack, 0, 0));
if (update_hit_idx) {
tracks[i].resetHits();
/*int nGoodItIdx = 0;*/
for (int hi = 0; hi < Config::nLayers; ++hi) {
tracks[i].addHitIdx(HitsIdx[hi](itrack, 0, 0),0.);
// FIXME: We probably want to use registers instead of going for gmem class members:
/*int hit_idx = HitsIdx[hi](itrack, 0, 0);*/
/*tracks[i].setHitIdx(hi, hit_idx);*/
/*if (hit_idx >= 0) {*/
/*nGoodItIdx++; */
/*}*/
}
/*tracks[i].setNGoodHitIdx(nGoodItIdx);*/
/*tracks[i].setChi2(0.);*/
}
}
}
__global__ void OutputTracksCU_kernel(Track *tracks,
GPlexLS Err_iP, GPlexLV Par_iP,
GPlexQI Chg, GPlexQF Chi2, GPlexQI Label,
GPlexQI *HitsIdx,
int beg, int end, int N,
const bool update_hit_idx=true) {
int itrack = threadIdx.x + blockDim.x*blockIdx.x;
OutputTracksCU_fn(tracks, Err_iP, Par_iP, Chg, Chi2, Label, HitsIdx,
beg, end, itrack, N, update_hit_idx);
}
void OutputTracksCU_wrapper(const hipStream_t &stream,
EtaBinOfCandidatesCU &etaBin,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexQI &Chg, GPlexQF &Chi2, GPlexQI &Label,
GPlexQI *HitsIdx,
const int beg, const int end, const bool outputProp, int N) {
int gridx = ::min((N-1)/BLOCK_SIZE_X + 1,
max_blocks_x);
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
hipLaunchKernelGGL(( OutputTracksCU_kernel) , dim3(grid), dim3(block), 0, stream ,
etaBin.m_candidates, Err_iP, Par_iP, Chg, Chi2, Label, HitsIdx, beg, end, N);
}
void OutputFittedTracksCU_wrapper(const hipStream_t &stream,
Track *tracks_cu,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexQI &Chg, GPlexQF &Chi2, GPlexQI &Label,
const int beg, const int end, int N) {
int gridx = ::min((N-1)/BLOCK_SIZE_X + 1,
max_blocks_x);
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
hipLaunchKernelGGL(( OutputTracksCU_kernel) , dim3(grid), dim3(block), 0, stream ,
tracks_cu, Err_iP, Par_iP, Chg, Chi2, Label, nullptr, beg, end, N, false);
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// m_tracks_per_seed: play the same role than seed_cand_idx in the cpu code
__device__ void InputTracksAndHitIdxComb_fn(Track *tracks, int *m_tracks_per_seed,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexQI &Chg, GPlexQF &Chi2,
GPlexQI &Label, GPlexQI *HitsIdx,
GPlexQI &SeedIdx, GPlexQI &CandIdx,
GPlexQB &Valid,
const int Nhits,
const int beg, const int end,
const int itrack_plex, const int N)
{
if (itrack_plex < N) {
int itrack_ev = beg + itrack_plex;
// TODO:: make sure that the width of the FitterCU is a multiple of
// Config::maxCandsPerSeed;
int iseed_ev = itrack_ev / Config::maxCandsPerSeed;
int icand_ev = itrack_ev % Config::maxCandsPerSeed;
// | o : o : x : x : x |
// iseed
// <----> m_tracks_per_seed[iseed]
// <------------------> maxCandsPerSeed
Valid(itrack_plex, 0, 0) = icand_ev < m_tracks_per_seed[iseed_ev]
&& m_tracks_per_seed[iseed_ev] != 0;
if (!Valid(itrack_plex, 0, 0)) {
return;
}
Track &trk = tracks[beg];
const char *varr = (char*) &trk;
int off_error = (char*) trk.errArrayCU() - varr;
int off_param = (char*) trk.posArrayCU() - varr;
int i= itrack_plex + beg; // TODO: i == itrack_ev
const Track &trk_i = tracks[i];
int idx = (char*) &trk_i - varr;
Label(itrack_plex, 0, 0) = tracks[i].label();
SeedIdx(itrack_plex, 0, 0) = iseed_ev;
CandIdx(itrack_plex, 0, 0) = icand_ev;
SlurpInIdx_fn(Err_iP, varr + off_error, idx, N);
SlurpInIdx_fn(Par_iP, varr + off_param, idx, N);
Chg(itrack_plex, 0, 0) = tracks[i].charge();
Chi2(itrack_plex, 0, 0) = tracks[i].chi2();
// Note Config::nLayers -- not suitable for building
for (int hi = 0; hi < Nhits; ++hi) {
HitsIdx[hi][itrack_plex] = tracks[i].getHitIdx(hi);
int hit_idx = HitsIdx[hi][itrack_plex];
}
}
}
__global__
void InputTracksAndHitIdxComb_kernel(Track *tracks, int *m_tracks_per_seed,
GPlexLS Err_iP, GPlexLV Par_iP,
GPlexQI Chg, GPlexQF Chi2,
GPlexQI Label, GPlexQI *HitsIdx,
GPlexQI SeedIdx, GPlexQI CandIdx,
GPlexQB Valid, const int Nhits,
const int beg, const int end,
const int N)
{
int itrack = threadIdx.x + blockDim.x*blockIdx.x;
InputTracksAndHitIdxComb_fn(tracks, m_tracks_per_seed,
Err_iP, Par_iP,
Chg, Chi2, Label, HitsIdx,
SeedIdx, CandIdx, Valid, Nhits ,
beg, end, itrack, N);
}
void InputTracksAndHitIdxComb_wrapper(const hipStream_t &stream,
const EtaBinOfCombCandidatesCU &etaBin,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexQI &Chg, GPlexQF &Chi2,
GPlexQI &Label, GPlexQI *HitsIdx,
GPlexQI &SeedIdx, GPlexQI &CandIdx,
GPlexQB &Valid, const int Nhits,
const int beg, const int end,
const bool inputProp, int N) {
int gridx = ::min((N-1)/BLOCK_SIZE_X + 1,
max_blocks_x);
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
hipLaunchKernelGGL(( InputTracksAndHitIdxComb_kernel), dim3(grid), dim3(block), 0, stream ,
etaBin.m_candidates.data(), etaBin.m_ntracks_per_seed.data(),
Err_iP, Par_iP,
Chg, Chi2, Label, HitsIdx,
SeedIdx, CandIdx, Valid, Nhits,
beg, end, N);
}
///////////////////////////////////////////////////////////////////////////////
__device__ void InputTracksAndHitIdxComb_fn_seed(Track *tracks, int *m_tracks_per_seed,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexQI &Chg, GPlexQF &Chi2,
GPlexQI &Label, GPlexQI *HitsIdx,
GPlexQI &SeedIdx, GPlexQI &CandIdx,
GPlexQB &Valid,
const int Nhits,
const int iseed_ev,
const int icand_ev,
const int iseed_plex,
const int N)
{
if (iseed_plex < N) {
// seed-based algorithm do not depend on Valid
Valid(iseed_plex, 0, 0) = 1;
Track &trk = tracks[0];
const char *varr = (char*) &trk;
int off_error = (char*) trk.errArrayCU() - varr;
int off_param = (char*) trk.posArrayCU() - varr;
int i = iseed_ev * Config::maxCandsPerSeed + icand_ev;
const Track &trk_i = tracks[i];
int idx = (char*) &trk_i - varr;
Label(iseed_plex, 0, 0) = tracks[i].label();
SeedIdx(iseed_plex, 0, 0) = iseed_ev;
CandIdx(iseed_plex, 0, 0) = icand_ev;
SlurpInIdx_fn(Err_iP, varr + off_error, idx, N);
SlurpInIdx_fn(Par_iP, varr + off_param, idx, N);
Chg(iseed_plex, 0, 0) = tracks[i].charge();
Chi2(iseed_plex, 0, 0) = tracks[i].chi2();
// Note Config::nLayers -- not suitable for building
for (int hi = 0; hi < Nhits; ++hi) {
HitsIdx[hi][iseed_plex] = tracks[i].getHitIdx(hi);
}
}
}
| 224759ed6d006c911ac22ee0a03d699073b70b13.cu | #include "reorganize_gplex.h"
#include <stdio.h>
#include "FitterCU.h"
#include "accessors_cu.h"
#include "Track.h"
#include "gpu_utils.h"
__device__ float *get_posArray(Hit &hit) {
return hit.posArrayCU();
}
__device__ float *get_errArray(Hit &hit) {
return hit.errArrayCU();
}
__device__ float *get_posArray(Track &track) {
return track.posArrayCU();
}
__device__ float *get_errArray(Track &track) {
return track.errArrayCU();
}
template <typename GPlexObj>
__device__ void SlurpIn_fn(GPlexObj to, // float *fArray, int stride, int kSize,
const char *arr, const int *vi, const int N) {
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (j<N) {
const int *XHitPos = vi;
const int off = XHitPos[j] * sizeof(Hit);
for (int i = 0; i < to.kSize; ++i) { // plex_size
to[j + to.stride*i] = *(decltype(to.ptr)) (arr + i*sizeof(decltype(*to.ptr)) + off);
}
}
}
template <typename GPlexObj>
__device__ void SlurpInIdx_fn(GPlexObj to,
const char *arr, const int idx, const int N) {
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (j<N) {
for (int i = 0; i < to.kSize; ++i) { // plex_size
auto tmp = *(decltype(to.ptr)) (arr + i*sizeof(decltype(*to.ptr)) + idx);
to[j + to.stride*i] = tmp;
}
}
}
template <typename GPlexObj>
__device__ void SlurpOutIdx_fn(GPlexObj from, // float *fArray, int stride, int kSize,
const char *arr, const int idx, const int N) {
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (j<N) {
for (int i = 0; i < from.kSize; ++i) { // plex_size
* (decltype(from.ptr)) (arr + i*sizeof(decltype(*from.ptr)) + idx) = from[j + from.stride*i];
}
}
}
__device__
void GetHitErr(GPlexHS& msErr, const char* array, const int beg, const int end)
{
SlurpInIdx_fn(msErr, array, beg, end);
}
__device__
void GetHitPar(GPlexHV& msPar, const char* array, const int beg, const int end)
{
SlurpInIdx_fn(msPar, array, beg, end);
}
__device__ void HitToMs_fn(GPlexHS &msErr, GPlexHV &msPar,
Hit *hits, const GPlexQI &XHitSize,
const GPlexHitIdx &XHitArr,
GPlexQI &HitsIdx, const int hit_cnt,
const int itrack, const int N) {
if (itrack < N) {
const char *varr = (char*) hits;
const int off_error = (char*) hits[0].errArrayCU() - varr;
const int off_param = (char*) hits[0].posArrayCU() - varr;
if (hit_cnt < XHitSize[itrack]) {
HitsIdx[itrack] = XHitArr(itrack, hit_cnt, 0) * sizeof(Hit);
}
SlurpInIdx_fn(msErr, varr + off_error, HitsIdx[itrack], N);
SlurpInIdx_fn(msPar, varr + off_param, HitsIdx[itrack], N);
}
}
__global__ void HitToMs_kernel(GPlexHS msErr, GPlexHV msPar, Hit *hits,
const GPlexQI XHitSize, const GPlexHitIdx XHitArr,
GPlexQI HitsIdx, const int hit_cnt, const int N) {
int itrack = threadIdx.x + blockDim.x * blockIdx.x;
HitToMs_fn(msErr, msPar, hits, XHitSize, XHitArr, HitsIdx, hit_cnt, itrack, N);
}
void HitToMs_wrapper(const cudaStream_t& stream,
GPlexHS &msErr, GPlexHV &msPar, LayerOfHitsCU &layer,
const GPlexQI &XHitSize, const GPlexHitIdx &XHitArr,
GPlexQI &HitsIdx, int hit_cnt, const int N) {
int gridx = std::min((N-1)/BLOCK_SIZE_X + 1,
max_blocks_x);
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
HitToMs_kernel <<< grid, block, 0 , stream >>>
(msErr, msPar, layer.m_hits.data(), XHitSize, XHitArr, HitsIdx, hit_cnt, N);
/*cudaDeviceSynchronize();*/
}
__device__ void InputTracksCU_fn (Track *tracks,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexQI &Chg, GPlexQF &Chi2,
GPlexQI &Label, GPlexQI *HitsIdx,
const int beg, const int end,
const int itrack, const int N) {
if (itrack < (end-beg) && itrack < N) {
Track &trk = tracks[beg];
const char *varr = (char*) &trk;
int off_error = (char*) trk.errArrayCU() - varr;
int off_param = (char*) trk.posArrayCU() - varr;
int i= itrack + beg;
const Track &trk_i = tracks[i];
int idx = (char*) &trk_i - varr;
Label(itrack, 0, 0) = tracks[i].label();
Chg(itrack, 0, 0) = tracks[i].charge();
Chi2(itrack, 0, 0) = tracks[i].chi2();
SlurpInIdx_fn(Err_iP, varr + off_error, idx, N);
SlurpInIdx_fn(Par_iP, varr + off_param, idx, N);
for (int hi = 0; hi < 3; ++hi)
HitsIdx[hi](itrack, 0, 0) = tracks[i].getHitIdx(hi);//dummy value for now
}
}
__global__ void InputTracksCU_kernel(Track *tracks,
GPlexLS Err_iP, GPlexLV Par_iP,
GPlexQI Chg, GPlexQF Chi2, GPlexQI Label,
GPlexQI *HitsIdx,
int beg, int end, int N) {
int itrack = threadIdx.x + blockDim.x*blockIdx.x;
InputTracksCU_fn(tracks, Err_iP, Par_iP, Chg, Chi2, Label, HitsIdx, beg, end, itrack, N);
}
void InputTracksCU_wrapper(const cudaStream_t &stream,
const EtaBinOfCandidatesCU &etaBin,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexQI &Chg, GPlexQF &Chi2, GPlexQI &Label,
GPlexQI *HitsIdx,
const int beg, const int end, const bool inputProp, int N) {
int gridx = std::min((N-1)/BLOCK_SIZE_X + 1,
max_blocks_x);
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
InputTracksCU_kernel <<< grid, block, 0, stream >>>
(etaBin.m_candidates, Err_iP, Par_iP, Chg, Chi2, Label, HitsIdx,
beg, end, N);
}
__device__ void InputTracksAndHitsCU_fn (Track *tracks, LayerOfHitsCU *layerHits,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexHS *msErr_arr, GPlexHV *msPar_arr,
GPlexQI &Chg, GPlexQF &Chi2,
GPlexQI &Label, GPlexQI *HitsIdx,
const int beg, const int end,
const int itrack, const int N) {
if (itrack < (end-beg) && itrack < N) {
Track &trk = tracks[beg];
const char *varr = (char*) &trk;
int off_error = (char*) trk.errArrayCU() - varr;
int off_param = (char*) trk.posArrayCU() - varr;
int i= itrack + beg;
const Track &trk_i = tracks[i];
int idx = (char*) &trk_i - varr;
Label(itrack, 0, 0) = tracks[i].label();
Chg(itrack, 0, 0) = tracks[i].charge();
Chi2(itrack, 0, 0) = tracks[i].chi2();
SlurpInIdx_fn(Err_iP, varr + off_error, idx, N);
SlurpInIdx_fn(Par_iP, varr + off_param, idx, N);
// Note Config::nLayers -- not suitable for building
for (int hi = 0; hi < Config::nLayers; ++hi) {
int hidx = tracks[i].getHitIdx(hi);
Hit &hit = layerHits[hi].m_hits[hidx];
HitsIdx[hi](itrack, 0, 0) = idx;
if (hidx < 0) continue;
SlurpInIdx_fn(msErr_arr[hi], (char *)hit.errArrayCU(), 0, N);
SlurpInIdx_fn(msPar_arr[hi], (char *)hit.posArrayCU(), 0, N);
}
}
}
__global__ void InputTracksAndHitsCU_kernel(Track *tracks, LayerOfHitsCU *layers,
GPlexLS Err_iP, GPlexLV Par_iP,
GPlexHS *msErr_arr, GPlexHV *msPar_arr,
GPlexQI Chg, GPlexQF Chi2, GPlexQI Label,
GPlexQI *HitsIdx,
int beg, int end, int N) {
int itrack = threadIdx.x + blockDim.x*blockIdx.x;
InputTracksAndHitsCU_fn(tracks, layers, Err_iP, Par_iP, msErr_arr, msPar_arr,
Chg, Chi2, Label, HitsIdx, beg, end, itrack, N);
}
void InputTracksAndHitsCU_wrapper(const cudaStream_t &stream,
Track *tracks, EventOfHitsCU &event_of_hits,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexHS *msErr_arr, GPlexHV *msPar_arr,
GPlexQI &Chg, GPlexQF &Chi2, GPlexQI &Label,
GPlexQI *HitsIdx,
const int beg, const int end,
const bool inputProp, int N) {
int gridx = std::min((N-1)/BLOCK_SIZE_X + 1,
max_blocks_x);
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
InputTracksAndHitsCU_kernel <<< grid, block, 0, stream >>>
(tracks, event_of_hits.m_layers_of_hits.data(),
Err_iP, Par_iP,
msErr_arr, msPar_arr,
Chg, Chi2, Label, HitsIdx,
beg, end, N);
}
__device__ void OutputParErrCU_fn(Track *tracks,
const GPlexLS &Err, const GPlexLV &Par,
const int beg, const int end,
const int itrack_plex, const int N) {
Track &trk = tracks[beg];
const char *varr = (char*) &trk;
int off_error = (char*) trk.errArrayCU() - varr;
int off_param = (char*) trk.posArrayCU() - varr;
int i= itrack_plex + beg;
const Track &trk_i = tracks[i];
int idx = (char*) &trk_i - varr;
SlurpOutIdx_fn(Err, varr + off_error, idx, N);
SlurpOutIdx_fn(Par, varr + off_param, idx, N);
}
__device__ void OutputParErrCU_fn_seed(Track *tracks,
const GPlexLS &Err, const GPlexLV &Par,
const int iseed_ev,
const int icand_ev,
int N) {
Track &trk = tracks[0];
const char *varr = (char*) &trk;
int off_error = (char*) trk.errArrayCU() - varr;
int off_param = (char*) trk.posArrayCU() - varr;
int i= iseed_ev * Config::maxCandsPerSeed + icand_ev;
const Track &trk_i = tracks[i];
int idx = (char*) &trk_i - varr;
SlurpOutIdx_fn(Err, varr + off_error, idx, N);
SlurpOutIdx_fn(Par, varr + off_param, idx, N);
}
__device__ void OutputTracksCU_fn(Track *tracks,
const GPlexLS &Err_iP, const GPlexLV &Par_iP,
const GPlexQI &Chg, const GPlexQF &Chi2,
const GPlexQI &Label, const GPlexQI *HitsIdx,
const int beg, const int end,
const int itrack, const int N,
const bool update_hit_idx) {
if (itrack < (end-beg) && itrack < N) {
Track &trk = tracks[beg];
const char *varr = (char*) &trk;
int off_error = (char*) trk.errArrayCU() - varr;
int off_param = (char*) trk.posArrayCU() - varr;
int i= itrack + beg;
const Track &trk_i = tracks[i];
int idx = (char*) &trk_i - varr;
SlurpOutIdx_fn(Err_iP, varr + off_error, idx, N);
SlurpOutIdx_fn(Par_iP, varr + off_param, idx, N);
tracks[i].setCharge(Chg(itrack, 0, 0));
tracks[i].setChi2(Chi2(itrack, 0, 0));
tracks[i].setLabel(Label(itrack, 0, 0));
if (update_hit_idx) {
tracks[i].resetHits();
/*int nGoodItIdx = 0;*/
for (int hi = 0; hi < Config::nLayers; ++hi) {
tracks[i].addHitIdx(HitsIdx[hi](itrack, 0, 0),0.);
// FIXME: We probably want to use registers instead of going for gmem class members:
/*int hit_idx = HitsIdx[hi](itrack, 0, 0);*/
/*tracks[i].setHitIdx(hi, hit_idx);*/
/*if (hit_idx >= 0) {*/
/*nGoodItIdx++; */
/*}*/
}
/*tracks[i].setNGoodHitIdx(nGoodItIdx);*/
/*tracks[i].setChi2(0.);*/
}
}
}
__global__ void OutputTracksCU_kernel(Track *tracks,
GPlexLS Err_iP, GPlexLV Par_iP,
GPlexQI Chg, GPlexQF Chi2, GPlexQI Label,
GPlexQI *HitsIdx,
int beg, int end, int N,
const bool update_hit_idx=true) {
int itrack = threadIdx.x + blockDim.x*blockIdx.x;
OutputTracksCU_fn(tracks, Err_iP, Par_iP, Chg, Chi2, Label, HitsIdx,
beg, end, itrack, N, update_hit_idx);
}
void OutputTracksCU_wrapper(const cudaStream_t &stream,
EtaBinOfCandidatesCU &etaBin,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexQI &Chg, GPlexQF &Chi2, GPlexQI &Label,
GPlexQI *HitsIdx,
const int beg, const int end, const bool outputProp, int N) {
int gridx = std::min((N-1)/BLOCK_SIZE_X + 1,
max_blocks_x);
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
OutputTracksCU_kernel <<< grid, block, 0, stream >>>
(etaBin.m_candidates, Err_iP, Par_iP, Chg, Chi2, Label, HitsIdx, beg, end, N);
}
void OutputFittedTracksCU_wrapper(const cudaStream_t &stream,
Track *tracks_cu,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexQI &Chg, GPlexQF &Chi2, GPlexQI &Label,
const int beg, const int end, int N) {
int gridx = std::min((N-1)/BLOCK_SIZE_X + 1,
max_blocks_x);
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
OutputTracksCU_kernel <<< grid, block, 0, stream >>>
(tracks_cu, Err_iP, Par_iP, Chg, Chi2, Label, nullptr, beg, end, N, false);
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// m_tracks_per_seed: play the same role than seed_cand_idx in the cpu code
__device__ void InputTracksAndHitIdxComb_fn(Track *tracks, int *m_tracks_per_seed,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexQI &Chg, GPlexQF &Chi2,
GPlexQI &Label, GPlexQI *HitsIdx,
GPlexQI &SeedIdx, GPlexQI &CandIdx,
GPlexQB &Valid,
const int Nhits,
const int beg, const int end,
const int itrack_plex, const int N)
{
if (itrack_plex < N) {
int itrack_ev = beg + itrack_plex;
// TODO:: make sure that the width of the FitterCU is a multiple of
// Config::maxCandsPerSeed;
int iseed_ev = itrack_ev / Config::maxCandsPerSeed;
int icand_ev = itrack_ev % Config::maxCandsPerSeed;
// | o : o : x : x : x |
// iseed
// <----> m_tracks_per_seed[iseed]
// <------------------> maxCandsPerSeed
Valid(itrack_plex, 0, 0) = icand_ev < m_tracks_per_seed[iseed_ev]
&& m_tracks_per_seed[iseed_ev] != 0;
if (!Valid(itrack_plex, 0, 0)) {
return;
}
Track &trk = tracks[beg];
const char *varr = (char*) &trk;
int off_error = (char*) trk.errArrayCU() - varr;
int off_param = (char*) trk.posArrayCU() - varr;
int i= itrack_plex + beg; // TODO: i == itrack_ev
const Track &trk_i = tracks[i];
int idx = (char*) &trk_i - varr;
Label(itrack_plex, 0, 0) = tracks[i].label();
SeedIdx(itrack_plex, 0, 0) = iseed_ev;
CandIdx(itrack_plex, 0, 0) = icand_ev;
SlurpInIdx_fn(Err_iP, varr + off_error, idx, N);
SlurpInIdx_fn(Par_iP, varr + off_param, idx, N);
Chg(itrack_plex, 0, 0) = tracks[i].charge();
Chi2(itrack_plex, 0, 0) = tracks[i].chi2();
// Note Config::nLayers -- not suitable for building
for (int hi = 0; hi < Nhits; ++hi) {
HitsIdx[hi][itrack_plex] = tracks[i].getHitIdx(hi);
int hit_idx = HitsIdx[hi][itrack_plex];
}
}
}
__global__
void InputTracksAndHitIdxComb_kernel(Track *tracks, int *m_tracks_per_seed,
GPlexLS Err_iP, GPlexLV Par_iP,
GPlexQI Chg, GPlexQF Chi2,
GPlexQI Label, GPlexQI *HitsIdx,
GPlexQI SeedIdx, GPlexQI CandIdx,
GPlexQB Valid, const int Nhits,
const int beg, const int end,
const int N)
{
int itrack = threadIdx.x + blockDim.x*blockIdx.x;
InputTracksAndHitIdxComb_fn(tracks, m_tracks_per_seed,
Err_iP, Par_iP,
Chg, Chi2, Label, HitsIdx,
SeedIdx, CandIdx, Valid, Nhits ,
beg, end, itrack, N);
}
void InputTracksAndHitIdxComb_wrapper(const cudaStream_t &stream,
const EtaBinOfCombCandidatesCU &etaBin,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexQI &Chg, GPlexQF &Chi2,
GPlexQI &Label, GPlexQI *HitsIdx,
GPlexQI &SeedIdx, GPlexQI &CandIdx,
GPlexQB &Valid, const int Nhits,
const int beg, const int end,
const bool inputProp, int N) {
int gridx = std::min((N-1)/BLOCK_SIZE_X + 1,
max_blocks_x);
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
InputTracksAndHitIdxComb_kernel<<< grid, block, 0, stream >>>
(etaBin.m_candidates.data(), etaBin.m_ntracks_per_seed.data(),
Err_iP, Par_iP,
Chg, Chi2, Label, HitsIdx,
SeedIdx, CandIdx, Valid, Nhits,
beg, end, N);
}
///////////////////////////////////////////////////////////////////////////////
__device__ void InputTracksAndHitIdxComb_fn_seed(Track *tracks, int *m_tracks_per_seed,
GPlexLS &Err_iP, GPlexLV &Par_iP,
GPlexQI &Chg, GPlexQF &Chi2,
GPlexQI &Label, GPlexQI *HitsIdx,
GPlexQI &SeedIdx, GPlexQI &CandIdx,
GPlexQB &Valid,
const int Nhits,
const int iseed_ev,
const int icand_ev,
const int iseed_plex,
const int N)
{
if (iseed_plex < N) {
// seed-based algorithm do not depend on Valid
Valid(iseed_plex, 0, 0) = 1;
Track &trk = tracks[0];
const char *varr = (char*) &trk;
int off_error = (char*) trk.errArrayCU() - varr;
int off_param = (char*) trk.posArrayCU() - varr;
int i = iseed_ev * Config::maxCandsPerSeed + icand_ev;
const Track &trk_i = tracks[i];
int idx = (char*) &trk_i - varr;
Label(iseed_plex, 0, 0) = tracks[i].label();
SeedIdx(iseed_plex, 0, 0) = iseed_ev;
CandIdx(iseed_plex, 0, 0) = icand_ev;
SlurpInIdx_fn(Err_iP, varr + off_error, idx, N);
SlurpInIdx_fn(Par_iP, varr + off_param, idx, N);
Chg(iseed_plex, 0, 0) = tracks[i].charge();
Chi2(iseed_plex, 0, 0) = tracks[i].chi2();
// Note Config::nLayers -- not suitable for building
for (int hi = 0; hi < Nhits; ++hi) {
HitsIdx[hi][iseed_plex] = tracks[i].getHitIdx(hi);
}
}
}
|
2342073d953d0418205efa29683ee31bdf04cff8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "psc_cuda2.h"
#define BLOCKSIZE_X 1
#define BLOCKSIZE_Y 16
#define BLOCKSIZE_Z 16
#include "psc_fields_cuda2.h"
// ----------------------------------------------------------------------
// FIXME
#include "cuda_wrap.h"
#define BND (2)
#define X3_DEV_OFF_YZ(fldnr, jy,jz) \
((((fldnr) \
*mz + ((jz)+2)) \
*my + ((jy)+2)) \
*1 + (0))
#undef F3_DEV
#define F3_DEV(fldnr,ix,jy,jz) \
(d_flds)[X3_DEV_OFF_YZ(fldnr, jy,jz)]
// FIXME end
// ----------------------------------------------------------------------
__global__ static void
push_fields_E_yz(real *d_flds0, real dt, real cny, real cnz, int my, int mz,
unsigned int size, int gridy)
{
int bidx_y = blockIdx.y % gridy;
int p = blockIdx.y / gridy;
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = bidx_y * blockDim.y + threadIdx.y;
if (!(iy < my - 2 * (2-BND) && iz < mz - 2 * (2-BND)))
return;
iy -= BND;
iz -= BND;
real *d_flds = d_flds0 + p * size;
F3_DEV(EX, 0,iy,iz) +=
cny * (F3_DEV(HZ, 0,iy,iz) - F3_DEV(HZ, 0,iy-1,iz)) -
cnz * (F3_DEV(HY, 0,iy,iz) - F3_DEV(HY, 0,iy,iz-1)) -
.5f * dt * F3_DEV(JXI, 0,iy,iz);
F3_DEV(EY, 0,iy,iz) +=
cnz * (F3_DEV(HX, 0,iy,iz) - F3_DEV(HX, 0,iy,iz-1)) -
0.f -
.5f * dt * F3_DEV(JYI, 0,iy,iz);
F3_DEV(EZ, 0,iy,iz) +=
0.f -
cny * (F3_DEV(HX, 0,iy,iz) - F3_DEV(HX, 0,iy-1,iz)) -
.5f * dt * F3_DEV(JZI, 0,iy,iz);
}
__global__ static void
push_fields_H_yz(real *d_flds0, real cny, real cnz, int my, int mz,
unsigned int size, int gridy)
{
int bidx_y = blockIdx.y % gridy;
int p = blockIdx.y / gridy;
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = bidx_y * blockDim.y + threadIdx.y;
if (!(iy < my - 2 * (2-BND) && iz < mz - 2 * (2-BND)))
return;
iy -= BND;
iz -= BND;
real *d_flds = d_flds0 + p * size;
F3_DEV(HX, 0,iy,iz) -=
cny * (F3_DEV(EZ, 0,iy+1,iz) - F3_DEV(EZ, 0,iy,iz)) -
cnz * (F3_DEV(EY, 0,iy,iz+1) - F3_DEV(EY, 0,iy,iz));
F3_DEV(HY, 0,iy,iz) -=
cnz * (F3_DEV(EX, 0,iy,iz+1) - F3_DEV(EX, 0,iy,iz)) -
0.f;
F3_DEV(HZ, 0,iy,iz) -=
0.f -
cny * (F3_DEV(EX, 0,iy+1,iz) - F3_DEV(EX, 0,iy,iz));
}
// ----------------------------------------------------------------------
// cuda2_push_mflds_E_yz
void
cuda2_push_mflds_E_yz(struct psc_mfields *mflds)
{
struct psc_mfields_cuda2 *sub = psc_mfields_cuda2(mflds);
if (mflds->nr_patches == 0) {
return;
}
struct psc_patch *patch = &ppsc->patch[0];
real dt = ppsc->dt;
real cny = .5f * ppsc->dt / patch->dx[1];
real cnz = .5f * ppsc->dt / patch->dx[2];
assert(patch->ldims[0] == 1);
unsigned int size = mflds->nr_fields *
sub->im[0] * sub->im[1] * sub->im[2];
int my = sub->im[1];
int mz = sub->im[2];
int dimBlock[2] = { BLOCKSIZE_Y, BLOCKSIZE_Z };
int grid[2] = { (patch->ldims[1] + 2*BND + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + 2*BND + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
int dimGrid[2] = { grid[0], grid[1] * mflds->nr_patches };
RUN_KERNEL(dimGrid, dimBlock,
push_fields_E_yz, (sub->d_flds, dt, cny, cnz, my, mz,
size, grid[1]));
}
// ----------------------------------------------------------------------
// cuda2_push_mflds_H_yz
void
cuda2_push_mflds_H_yz(struct psc_mfields *mflds)
{
struct psc_mfields_cuda2 *sub = psc_mfields_cuda2(mflds);
if (mflds->nr_patches == 0) {
return;
}
struct psc_patch *patch = &ppsc->patch[0];
real cny = .5f * ppsc->dt / patch->dx[1];
real cnz = .5f * ppsc->dt / patch->dx[2];
assert(patch->ldims[0] == 1);
unsigned int size = mflds->nr_fields *
sub->im[0] * sub->im[1] * sub->im[2];
int my = sub->im[1];
int mz = sub->im[2];
int dimBlock[2] = { BLOCKSIZE_Y, BLOCKSIZE_Z };
int grid[2] = { (patch->ldims[1] + 2*BND + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + 2*BND + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
int dimGrid[2] = { grid[0], grid[1] * mflds->nr_patches };
RUN_KERNEL(dimGrid, dimBlock,
push_fields_H_yz, (sub->d_flds, cny, cnz, my, mz,
size, grid[1]));
}
| 2342073d953d0418205efa29683ee31bdf04cff8.cu |
#include "psc_cuda2.h"
#define BLOCKSIZE_X 1
#define BLOCKSIZE_Y 16
#define BLOCKSIZE_Z 16
#include "psc_fields_cuda2.h"
// ----------------------------------------------------------------------
// FIXME
#include "cuda_wrap.h"
#define BND (2)
#define X3_DEV_OFF_YZ(fldnr, jy,jz) \
((((fldnr) \
*mz + ((jz)+2)) \
*my + ((jy)+2)) \
*1 + (0))
#undef F3_DEV
#define F3_DEV(fldnr,ix,jy,jz) \
(d_flds)[X3_DEV_OFF_YZ(fldnr, jy,jz)]
// FIXME end
// ----------------------------------------------------------------------
__global__ static void
push_fields_E_yz(real *d_flds0, real dt, real cny, real cnz, int my, int mz,
unsigned int size, int gridy)
{
int bidx_y = blockIdx.y % gridy;
int p = blockIdx.y / gridy;
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = bidx_y * blockDim.y + threadIdx.y;
if (!(iy < my - 2 * (2-BND) && iz < mz - 2 * (2-BND)))
return;
iy -= BND;
iz -= BND;
real *d_flds = d_flds0 + p * size;
F3_DEV(EX, 0,iy,iz) +=
cny * (F3_DEV(HZ, 0,iy,iz) - F3_DEV(HZ, 0,iy-1,iz)) -
cnz * (F3_DEV(HY, 0,iy,iz) - F3_DEV(HY, 0,iy,iz-1)) -
.5f * dt * F3_DEV(JXI, 0,iy,iz);
F3_DEV(EY, 0,iy,iz) +=
cnz * (F3_DEV(HX, 0,iy,iz) - F3_DEV(HX, 0,iy,iz-1)) -
0.f -
.5f * dt * F3_DEV(JYI, 0,iy,iz);
F3_DEV(EZ, 0,iy,iz) +=
0.f -
cny * (F3_DEV(HX, 0,iy,iz) - F3_DEV(HX, 0,iy-1,iz)) -
.5f * dt * F3_DEV(JZI, 0,iy,iz);
}
__global__ static void
push_fields_H_yz(real *d_flds0, real cny, real cnz, int my, int mz,
unsigned int size, int gridy)
{
int bidx_y = blockIdx.y % gridy;
int p = blockIdx.y / gridy;
int iy = blockIdx.x * blockDim.x + threadIdx.x;
int iz = bidx_y * blockDim.y + threadIdx.y;
if (!(iy < my - 2 * (2-BND) && iz < mz - 2 * (2-BND)))
return;
iy -= BND;
iz -= BND;
real *d_flds = d_flds0 + p * size;
F3_DEV(HX, 0,iy,iz) -=
cny * (F3_DEV(EZ, 0,iy+1,iz) - F3_DEV(EZ, 0,iy,iz)) -
cnz * (F3_DEV(EY, 0,iy,iz+1) - F3_DEV(EY, 0,iy,iz));
F3_DEV(HY, 0,iy,iz) -=
cnz * (F3_DEV(EX, 0,iy,iz+1) - F3_DEV(EX, 0,iy,iz)) -
0.f;
F3_DEV(HZ, 0,iy,iz) -=
0.f -
cny * (F3_DEV(EX, 0,iy+1,iz) - F3_DEV(EX, 0,iy,iz));
}
// ----------------------------------------------------------------------
// cuda2_push_mflds_E_yz
void
cuda2_push_mflds_E_yz(struct psc_mfields *mflds)
{
struct psc_mfields_cuda2 *sub = psc_mfields_cuda2(mflds);
if (mflds->nr_patches == 0) {
return;
}
struct psc_patch *patch = &ppsc->patch[0];
real dt = ppsc->dt;
real cny = .5f * ppsc->dt / patch->dx[1];
real cnz = .5f * ppsc->dt / patch->dx[2];
assert(patch->ldims[0] == 1);
unsigned int size = mflds->nr_fields *
sub->im[0] * sub->im[1] * sub->im[2];
int my = sub->im[1];
int mz = sub->im[2];
int dimBlock[2] = { BLOCKSIZE_Y, BLOCKSIZE_Z };
int grid[2] = { (patch->ldims[1] + 2*BND + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + 2*BND + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
int dimGrid[2] = { grid[0], grid[1] * mflds->nr_patches };
RUN_KERNEL(dimGrid, dimBlock,
push_fields_E_yz, (sub->d_flds, dt, cny, cnz, my, mz,
size, grid[1]));
}
// ----------------------------------------------------------------------
// cuda2_push_mflds_H_yz
void
cuda2_push_mflds_H_yz(struct psc_mfields *mflds)
{
struct psc_mfields_cuda2 *sub = psc_mfields_cuda2(mflds);
if (mflds->nr_patches == 0) {
return;
}
struct psc_patch *patch = &ppsc->patch[0];
real cny = .5f * ppsc->dt / patch->dx[1];
real cnz = .5f * ppsc->dt / patch->dx[2];
assert(patch->ldims[0] == 1);
unsigned int size = mflds->nr_fields *
sub->im[0] * sub->im[1] * sub->im[2];
int my = sub->im[1];
int mz = sub->im[2];
int dimBlock[2] = { BLOCKSIZE_Y, BLOCKSIZE_Z };
int grid[2] = { (patch->ldims[1] + 2*BND + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y,
(patch->ldims[2] + 2*BND + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z };
int dimGrid[2] = { grid[0], grid[1] * mflds->nr_patches };
RUN_KERNEL(dimGrid, dimBlock,
push_fields_H_yz, (sub->d_flds, cny, cnz, my, mz,
size, grid[1]));
}
|
616dae17bde31692152100f4d80504fce793d9c8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <time.h>
#include "VectorKernel.cu"
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <functional>
#define MAXLINE 100000
extern "C"
void computeGold(float* C, const float* A, unsigned int N);
Vector AllocateDeviceVector(const Vector V);
Vector AllocateVector(int length, int init, float initvalue);
void CopyToDeviceVector(Vector Vdevice, const Vector Vhost);
void CopyFromDeviceVector(Vector Vhost, const Vector Vdevice);
const char* getfield(char* line, int num);
void ReadFile(float* stock_data, char* file_name, int nums);
void WriteFile(Vector V, char* file_name);
//-----------------------------Parallel---------------------------------
float VectorVariance(const Vector A, float mean); // Variance
// void VectorStDeviation(Vector A, Vector E, float mean); // Standard deviation
void PrintVector(Vector M);
float* UniformNumberGenerator(int size,char * pickRNG);
void VectorPercentChange(const Vector A, Vector C);
//----------------------------------------------------------------------
float computeGold_percentChange(float* C, const float* A, unsigned int N);
float computeGold_stdDev(float* C, float average_daily, unsigned int N);
float* computeGold_randVal(float std_dev, int size, float* input);
float* computeGold_NextVal(float* today, float drift, float* rand_val);
//----------------------------------------------------------------------
void PrintData(float* data, int size);
void VectorDrift(Vector E, float variance);
float* RandomValueAndNextDayPrice(Vector today, float* randVector, float std_deviation,float drift);
void VectorNextDay(float today, float drift, float* randomValue );
int main(int argc, char** argv){
srand(time(0));
Vector StockData; //Input data
Vector PercentChange; //Store percent change
Vector PercentChangeS; // Sequential Version
// float std_deviation = 0;
// Check command line for input vector files
printf("argc %d\n",argc);
printf("argv %s\n",argv[1]);
char RNG[10] = "mt1";
if(argc == 1)
{
// No inputs provided
// Allocate and initialize the vectors
StockData = AllocateVector(VSIZE, 1, 0.0f);
PercentChange = AllocateVector(VSIZE, 0, 0.0f);
PercentChangeS = AllocateVector(VSIZE, 0, 0.0f);
}
else if( argc == 2)
{
// Inputs provided
// Allocate and read source vectors from disk
StockData = AllocateVector(VSIZE, 0, 0.0f);
PercentChange = AllocateVector(VSIZE, 0, 0.0f);
PercentChangeS = AllocateVector(VSIZE, 0, 0.0f);
ReadFile(StockData.elements, argv[1], VSIZE);
//PrintVector(StockData);
}
else if( argc == 3)
{
// Inputs provided
// Allocate and read source vectors from disk
StockData = AllocateVector(VSIZE, 0, 0.0f);
PercentChange = AllocateVector(VSIZE, 0, 0.0f);
PercentChangeS = AllocateVector(VSIZE, 0, 0.0f);
ReadFile(StockData.elements, argv[1], VSIZE);
strcpy(RNG,argv[2]);
//PrintVector(StockData);
}
// A + B on the device
mkdir("output", S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
//-----------------------------Parallel---------------------------------
hipEvent_t start_GPU, stop_GPU;
float GPU_time = 0;
hipEventCreate(&start_GPU);
hipEventCreate(&stop_GPU);
hipEventRecord(start_GPU);
VectorPercentChange(StockData, PercentChange);
//PrintVector(PercentChange);
float result = thrust::reduce(thrust::host, PercentChange.elements, PercentChange.elements + PercentChange.length, 0.0f, thrust::plus<float>());
float AVG = result/(PercentChange.length);
//printf("AVG: %f\n", AVG);
float VAR = VectorVariance(PercentChange,AVG);
//printf("VAR: %f\n", VAR);
float DRIFT = AVG - (VAR/2.0f);
//printf("DRIFT: %f\n", DRIFT);
float STD = sqrt(VAR);
//printf("STD: %f\n", STD);
Vector Today = AllocateVector(SIM_SIZE, 0, StockData.elements[StockData.length-1]);
Vector next_day;
Vector RANDSTORE;
RANDSTORE.elements = (float*)malloc(SIM_SIZE*DAYS*sizeof(float));
RANDSTORE.length = SIM_SIZE*DAYS;
char Buffer[30];
for(int i = 0; i<DAYS;i++){
//PrintData(Today.elements, Today.length);
sprintf(Buffer,"output/output%d.txt",i);
//WriteFile(Today,Buffer);
float * randVector = UniformNumberGenerator(SIM_SIZE,(char*)RNG);
memcpy(RANDSTORE.elements + (i*SIM_SIZE),randVector,SIM_SIZE*sizeof(float));
//printf("Random numbers: ");
//PrintData(randVector, SIM_SIZE);
next_day.length = Today.length;
next_day.elements = RandomValueAndNextDayPrice(Today,randVector,STD,DRIFT);
memcpy(Today.elements, next_day.elements, SIM_SIZE*sizeof(float));
free(randVector);
}
hipEventRecord(stop_GPU);
hipEventSynchronize(stop_GPU);
hipEventElapsedTime(&GPU_time, start_GPU, stop_GPU);
printf("GPU time: %f ms\n", GPU_time);
hipEventDestroy(start_GPU);
hipEventDestroy(stop_GPU);
//------------------------------------------------------------------
WriteFile(RANDSTORE,(char*)"random.txt");
free(PercentChange.elements);
PercentChange.elements = NULL;
//----------------------------Sequential----------------------------
hipEvent_t start, stop;
float CPU_time = 0.0;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
float average_daily;
average_daily = computeGold_percentChange(PercentChangeS.elements, StockData.elements, VSIZE);
float std_dev;
std_dev = computeGold_stdDev(PercentChangeS.elements, average_daily, VSIZE);
float drift ;
drift = average_daily - (pow(std_dev,2.0) / 2 );
float* rand_num;
float* input;
input = (float*)malloc(SIM_SIZE * DAYS * sizeof(float));
rand_num = (float*)malloc(DAYS * SIM_SIZE * sizeof(float));
for(int i = 0 ; i < DAYS *SIM_SIZE ; i ++){
input[i] = (double)rand()/ (RAND_MAX);
}
float *Next_dayS, *today;
Next_dayS = (float*)malloc(DAYS * SIM_SIZE * sizeof(float));
today = (float*)malloc(SIM_SIZE *DAYS* sizeof(float));
for(int i = 0 ; i < SIM_SIZE* DAYS ; i ++){
today[i]=StockData.elements[VSIZE-1];
}
rand_num = computeGold_randVal(std_dev, SIM_SIZE * DAYS ,input);
Next_dayS=computeGold_NextVal(today, drift, rand_num);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&CPU_time, start, stop);
printf("CPU time: %f ms\n", CPU_time);
hipEventDestroy(start_GPU);
hipEventDestroy(stop_GPU);
mkdir("outputS", S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
Vector CHUNK;
CHUNK.elements = (float*)malloc(SIM_SIZE*sizeof(float));
CHUNK.length=SIM_SIZE;
memcpy(CHUNK.elements,Next_dayS,SIM_SIZE*sizeof(float));
for(int i = 0; i < DAYS; i++){
sprintf(Buffer, "outputS/output%d.txt",i);
WriteFile(CHUNK,Buffer);
memcpy(CHUNK.elements,Next_dayS+(i*SIM_SIZE),SIM_SIZE*sizeof(float));
}
free(PercentChangeS.elements);
PercentChangeS.elements = NULL;
free(rand_num);
free(input);
free(Next_dayS);
rand_num = NULL;
input = NULL;
Next_dayS = NULL;
free(StockData.elements);
StockData.elements = NULL;
//------------------------------------------------------------------
return 0;
}
float* RandomValueAndNextDayPrice(Vector today, float* randVector, float std_deviation,float drift){
//---------------------------input----------------------------------
Vector today_dev = AllocateDeviceVector(today);
CopyToDeviceVector(today_dev, today);
float* randVector_dev;
hipMalloc((void**) &randVector_dev, today.length*sizeof(float));
hipMemcpy(randVector_dev, randVector, today.length * sizeof(float), hipMemcpyHostToDevice);
//---------------------------output---------------------------------
float* next_day_dev;
float* next_day_host;
next_day_host = (float *)calloc(today.length, sizeof(float));
hipMalloc((void**) &next_day_dev, today.length*sizeof(float));
//--------------------------kernel----------------------------------
dim3 dim_grid, dim_block;
dim_grid.x = (today.length + BLOCK_SIZE ) / (BLOCK_SIZE);
dim_grid.y = 1;
dim_grid.z = 1;
dim_block.x = BLOCK_SIZE;
dim_block.y = 1;
dim_block.z = 1;
hipLaunchKernelGGL(( VectorRandomValueAndNextDayPriceKernel), dim3(dim_grid),dim3(dim_block), 0, 0, today_dev,randVector_dev,std_deviation,drift,next_day_dev);
//------------------------------------------------------------------
hipMemcpy(next_day_host, next_day_dev, today.length * sizeof(float), hipMemcpyDeviceToHost);
hipFree(today_dev.elements);
hipFree(next_day_dev);
hipFree(randVector_dev);
return next_day_host;
}
void VectorPercentChange(const Vector A, Vector C){
//Interface host call to the device kernel code and invoke the kernel
//hipError_t cuda_ret;
Vector d_A, d_C;
// steps:
// 1. allocate device vectors d_A and d_C with length same as input vector
d_A = AllocateDeviceVector(A);
d_C = AllocateDeviceVector(C);
// 2. copy A to d_A,
CopyToDeviceVector(d_A, A);
//CopyToDeviceVector(d_C, C);
// 3. launch kernel to compute d_C = d_An[1]/d_An[x-1]
dim3 dim_grid, dim_block;
dim_block.x = BLOCK_SIZE; dim_block.y = dim_block.z = 1;
dim_grid.x = A.length / dim_block.x;
if(A.length % dim_block.x != 0) dim_grid.x++;
dim_grid.y = 1;
dim_grid.z = 1;
hipLaunchKernelGGL(( VectorPercentChangeKernel), dim3(dim_grid), dim3(dim_block), 0, 0, d_A, d_C);
// 4. copy d_C back to host vector C
CopyFromDeviceVector(C,d_C);
// 5. free device vectors d_A, d_B, d_C
hipFree(d_A.elements);
hipFree(d_C.elements);
}
float* UniformNumberGenerator(int size,char * pickRNG){
hiprandGenerator_t gen;
float *devData, *host;
host = (float *)calloc(size, sizeof(float));
hipMalloc((void**) &devData, size*sizeof(float));
if(strcmp(pickRNG,"mt1") == 0){
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MTGP32); //Mersenne Twister family number 1
}else if(strcmp(pickRNG,"mt2")== 0){
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MT19937); //Mersenne Twister family number 2
}else if(strcmp(pickRNG,"lfsr")== 0){
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_XORWOW); //linear feedback shift register
}else if(strcmp(pickRNG,"mrg")== 0){
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MRG32K3A); //multiple recursive generator
}else if(strcmp(pickRNG,"phi")== 0){
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_PHILOX4_32_10); //Philox
}else if(strcmp(pickRNG,"sobol")== 0){
hiprandCreateGenerator(&gen, HIPRAND_RNG_QUASI_SOBOL32); //SOBOL
}
long randvalue = (long)rand();
//printf("%ld\n", randvalue);
hiprandSetPseudoRandomGeneratorSeed(gen, randvalue);
hiprandGenerateUniform(gen, devData, size);
hipMemcpy(host, devData, size * sizeof(float), hipMemcpyDeviceToHost);
hiprandDestroyGenerator(gen);
hipFree(devData);
return host;
}
//Vector A: Input
//Vector D: output
//mean: C.elements[0]
float VectorVariance(const Vector A,float mean){
Vector d_A;
Vector d_D;
Vector D = AllocateVector(A.length, 0, 0.0f);
d_A = AllocateDeviceVector(A);
d_D = AllocateDeviceVector(D);
CopyToDeviceVector(d_A, A);
dim3 dimGrid, dimBlock;
dimBlock.x = BLOCK_SIZE;
dimBlock.y = 1;
dimBlock.z = 1;
dimGrid.x = (VSIZE ) /(BLOCK_SIZE);
if(VSIZE % BLOCK_SIZE != 0) {dimGrid.x++;}
dimGrid.y = 1;
dimGrid.z = 1;
hipLaunchKernelGGL(( VectorVarianceKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, d_A, d_D,mean);
CopyFromDeviceVector(D,d_D);
hipFree(d_D.elements);
float result = thrust::reduce(thrust::host, D.elements, D.elements + D.length,0.0f,thrust::plus<float>());
float VAR = result/D.length;
free(D.elements);
return VAR;
}
Vector AllocateVector(int length, int init, float initvalue)
{
Vector V;
V.length = length;
V.elements = NULL;
V.elements = (float*) malloc(length*sizeof(float));
for(unsigned int i = 0; i < V.length; i++)
{
if(init == 0){
V.elements[i] = initvalue;
}else if(init == 1){
V.elements[i] = (rand()/(float)RAND_MAX);
}
}
return V;
}
Vector AllocateDeviceVector(const Vector V)
{
Vector Vdevice = V;
int size = V.length * sizeof(float);
hipError_t cuda_ret = hipMalloc((void**)&Vdevice.elements, size);
if(cuda_ret != hipSuccess) {
printf("Unable to allocate device memory");
exit(0);
}
return Vdevice;
}
void CopyToDeviceVector(Vector Vdevice, const Vector Vhost)
{
int size = Vhost.length * sizeof(float);
Vdevice.length = Vhost.length;
hipMemcpy(Vdevice.elements, Vhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device vector to a host vector.
void CopyFromDeviceVector(Vector Vhost, const Vector Vdevice)
{
int size = Vdevice.length * sizeof(float);
hipMemcpy(Vhost.elements, Vdevice.elements, size,
hipMemcpyDeviceToHost);
}
const char* getfield(char* line, int num)
{
const char* tok;
for (tok = strtok(line, ",");tok && *tok;tok = strtok(NULL, ",\n"))
{
if (!--num)
return tok;
}
return NULL;
}
void ReadFile(float* stock_data, char* file_name, int nums)
{
FILE* stream = fopen(file_name, "r");
char line[1024];
fgets(line, 1024, stream);
for(int i = 0; i < nums; i++) {
fgets(line, 1024, stream);
char* tmp = strdup(line);
stock_data[i] = atof(getfield(tmp, 2));
free(tmp);
}
}
// Write a floating point vector to file
void WriteFile(Vector V, char* file_name)
{
FILE* output = fopen(file_name, "w");
for (unsigned i = 0; i < V.length; i++) {
fprintf(output, "%f\n", V.elements[i]);
}
fclose(output);
}
// print float array
void PrintData(float* data, int size){
//Print out the result
for(int i = 0; i < size ; i++){
printf(" %9.6f ", data[i]);
}
printf("\n");
}
void PrintVector(Vector M){
printf("---------------------------------\n");
for (int i = 0; i < M.length; i++){
printf("%f|", M.elements[i]);
}
printf("\n");
}
//------------------------------sequential code-------------------------
float computeGold_percentChange(float* C, const float* A, unsigned int N){
float average_daily;
unsigned int i = 0;
C[i] = 0;
for (i=1; i < N; i++){
C[i] = logf((A[i])/(A[i-1]));
}
float total= 0.0f;
for (int i=1; i < N; i++){
total += C[i];
}
average_daily = total/(N-1);
return average_daily;
}
float computeGold_stdDev(float* C, float average_daily, unsigned int N){
float variance, std_deviation;
float sum1=0.0f;
for (int i = 1; i < N; i++)
{
sum1 = sum1 + pow((C[i] - average_daily), 2);
}
variance = sum1 / (float)(N-1);
std_deviation = sqrt(variance);
return std_deviation;
}
float* computeGold_randVal(float std_dev, int size, float* input){
float* rand_num;
rand_num = (float*)malloc(size * sizeof(float));
for(int i = 0; i < DAYS; i++){
for(int j = 0; j < SIM_SIZE; j++){
//rand_num[j* DAYS + i] = std_dev * normcdfinvf(input[j* DAYS + i]);
rand_num[i* SIM_SIZE + j] = std_dev * normcdfinvf(input[i* SIM_SIZE + j]);
}
}
return rand_num;
}
float* computeGold_NextVal(float* today, float drift, float* rand_val){
for(int i = 0; i < DAYS; i++){
for(int j = 0; j < SIM_SIZE; j++){
if(i == 0){
today[i* SIM_SIZE + j] = today[i* SIM_SIZE + j]* exp( drift +rand_val[i* SIM_SIZE + j]);
}
else{
today[i* SIM_SIZE + j] = today[(i-1)* SIM_SIZE + j]* exp( drift +rand_val[i* SIM_SIZE + j]);
}
}
}
return today;
}
| 616dae17bde31692152100f4d80504fce793d9c8.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
#include <math.h>
#include <cuda.h>
#include <curand.h>
#include <time.h>
#include "VectorKernel.cu"
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <functional>
#define MAXLINE 100000
extern "C"
void computeGold(float* C, const float* A, unsigned int N);
Vector AllocateDeviceVector(const Vector V);
Vector AllocateVector(int length, int init, float initvalue);
void CopyToDeviceVector(Vector Vdevice, const Vector Vhost);
void CopyFromDeviceVector(Vector Vhost, const Vector Vdevice);
const char* getfield(char* line, int num);
void ReadFile(float* stock_data, char* file_name, int nums);
void WriteFile(Vector V, char* file_name);
//-----------------------------Parallel---------------------------------
float VectorVariance(const Vector A, float mean); // Variance
// void VectorStDeviation(Vector A, Vector E, float mean); // Standard deviation
void PrintVector(Vector M);
float* UniformNumberGenerator(int size,char * pickRNG);
void VectorPercentChange(const Vector A, Vector C);
//----------------------------------------------------------------------
float computeGold_percentChange(float* C, const float* A, unsigned int N);
float computeGold_stdDev(float* C, float average_daily, unsigned int N);
float* computeGold_randVal(float std_dev, int size, float* input);
float* computeGold_NextVal(float* today, float drift, float* rand_val);
//----------------------------------------------------------------------
void PrintData(float* data, int size);
void VectorDrift(Vector E, float variance);
float* RandomValueAndNextDayPrice(Vector today, float* randVector, float std_deviation,float drift);
void VectorNextDay(float today, float drift, float* randomValue );
int main(int argc, char** argv){
srand(time(0));
Vector StockData; //Input data
Vector PercentChange; //Store percent change
Vector PercentChangeS; // Sequential Version
// float std_deviation = 0;
// Check command line for input vector files
printf("argc %d\n",argc);
printf("argv %s\n",argv[1]);
char RNG[10] = "mt1";
if(argc == 1)
{
// No inputs provided
// Allocate and initialize the vectors
StockData = AllocateVector(VSIZE, 1, 0.0f);
PercentChange = AllocateVector(VSIZE, 0, 0.0f);
PercentChangeS = AllocateVector(VSIZE, 0, 0.0f);
}
else if( argc == 2)
{
// Inputs provided
// Allocate and read source vectors from disk
StockData = AllocateVector(VSIZE, 0, 0.0f);
PercentChange = AllocateVector(VSIZE, 0, 0.0f);
PercentChangeS = AllocateVector(VSIZE, 0, 0.0f);
ReadFile(StockData.elements, argv[1], VSIZE);
//PrintVector(StockData);
}
else if( argc == 3)
{
// Inputs provided
// Allocate and read source vectors from disk
StockData = AllocateVector(VSIZE, 0, 0.0f);
PercentChange = AllocateVector(VSIZE, 0, 0.0f);
PercentChangeS = AllocateVector(VSIZE, 0, 0.0f);
ReadFile(StockData.elements, argv[1], VSIZE);
strcpy(RNG,argv[2]);
//PrintVector(StockData);
}
// A + B on the device
mkdir("output", S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
//-----------------------------Parallel---------------------------------
cudaEvent_t start_GPU, stop_GPU;
float GPU_time = 0;
cudaEventCreate(&start_GPU);
cudaEventCreate(&stop_GPU);
cudaEventRecord(start_GPU);
VectorPercentChange(StockData, PercentChange);
//PrintVector(PercentChange);
float result = thrust::reduce(thrust::host, PercentChange.elements, PercentChange.elements + PercentChange.length, 0.0f, thrust::plus<float>());
float AVG = result/(PercentChange.length);
//printf("AVG: %f\n", AVG);
float VAR = VectorVariance(PercentChange,AVG);
//printf("VAR: %f\n", VAR);
float DRIFT = AVG - (VAR/2.0f);
//printf("DRIFT: %f\n", DRIFT);
float STD = sqrt(VAR);
//printf("STD: %f\n", STD);
Vector Today = AllocateVector(SIM_SIZE, 0, StockData.elements[StockData.length-1]);
Vector next_day;
Vector RANDSTORE;
RANDSTORE.elements = (float*)malloc(SIM_SIZE*DAYS*sizeof(float));
RANDSTORE.length = SIM_SIZE*DAYS;
char Buffer[30];
for(int i = 0; i<DAYS;i++){
//PrintData(Today.elements, Today.length);
sprintf(Buffer,"output/output%d.txt",i);
//WriteFile(Today,Buffer);
float * randVector = UniformNumberGenerator(SIM_SIZE,(char*)RNG);
memcpy(RANDSTORE.elements + (i*SIM_SIZE),randVector,SIM_SIZE*sizeof(float));
//printf("Random numbers: ");
//PrintData(randVector, SIM_SIZE);
next_day.length = Today.length;
next_day.elements = RandomValueAndNextDayPrice(Today,randVector,STD,DRIFT);
memcpy(Today.elements, next_day.elements, SIM_SIZE*sizeof(float));
free(randVector);
}
cudaEventRecord(stop_GPU);
cudaEventSynchronize(stop_GPU);
cudaEventElapsedTime(&GPU_time, start_GPU, stop_GPU);
printf("GPU time: %f ms\n", GPU_time);
cudaEventDestroy(start_GPU);
cudaEventDestroy(stop_GPU);
//------------------------------------------------------------------
WriteFile(RANDSTORE,(char*)"random.txt");
free(PercentChange.elements);
PercentChange.elements = NULL;
//----------------------------Sequential----------------------------
cudaEvent_t start, stop;
float CPU_time = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
float average_daily;
average_daily = computeGold_percentChange(PercentChangeS.elements, StockData.elements, VSIZE);
float std_dev;
std_dev = computeGold_stdDev(PercentChangeS.elements, average_daily, VSIZE);
float drift ;
drift = average_daily - (pow(std_dev,2.0) / 2 );
float* rand_num;
float* input;
input = (float*)malloc(SIM_SIZE * DAYS * sizeof(float));
rand_num = (float*)malloc(DAYS * SIM_SIZE * sizeof(float));
for(int i = 0 ; i < DAYS *SIM_SIZE ; i ++){
input[i] = (double)rand()/ (RAND_MAX);
}
float *Next_dayS, *today;
Next_dayS = (float*)malloc(DAYS * SIM_SIZE * sizeof(float));
today = (float*)malloc(SIM_SIZE *DAYS* sizeof(float));
for(int i = 0 ; i < SIM_SIZE* DAYS ; i ++){
today[i]=StockData.elements[VSIZE-1];
}
rand_num = computeGold_randVal(std_dev, SIM_SIZE * DAYS ,input);
Next_dayS=computeGold_NextVal(today, drift, rand_num);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&CPU_time, start, stop);
printf("CPU time: %f ms\n", CPU_time);
cudaEventDestroy(start_GPU);
cudaEventDestroy(stop_GPU);
mkdir("outputS", S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
Vector CHUNK;
CHUNK.elements = (float*)malloc(SIM_SIZE*sizeof(float));
CHUNK.length=SIM_SIZE;
memcpy(CHUNK.elements,Next_dayS,SIM_SIZE*sizeof(float));
for(int i = 0; i < DAYS; i++){
sprintf(Buffer, "outputS/output%d.txt",i);
WriteFile(CHUNK,Buffer);
memcpy(CHUNK.elements,Next_dayS+(i*SIM_SIZE),SIM_SIZE*sizeof(float));
}
free(PercentChangeS.elements);
PercentChangeS.elements = NULL;
free(rand_num);
free(input);
free(Next_dayS);
rand_num = NULL;
input = NULL;
Next_dayS = NULL;
free(StockData.elements);
StockData.elements = NULL;
//------------------------------------------------------------------
return 0;
}
float* RandomValueAndNextDayPrice(Vector today, float* randVector, float std_deviation,float drift){
//---------------------------input----------------------------------
Vector today_dev = AllocateDeviceVector(today);
CopyToDeviceVector(today_dev, today);
float* randVector_dev;
cudaMalloc((void**) &randVector_dev, today.length*sizeof(float));
cudaMemcpy(randVector_dev, randVector, today.length * sizeof(float), cudaMemcpyHostToDevice);
//---------------------------output---------------------------------
float* next_day_dev;
float* next_day_host;
next_day_host = (float *)calloc(today.length, sizeof(float));
cudaMalloc((void**) &next_day_dev, today.length*sizeof(float));
//--------------------------kernel----------------------------------
dim3 dim_grid, dim_block;
dim_grid.x = (today.length + BLOCK_SIZE ) / (BLOCK_SIZE);
dim_grid.y = 1;
dim_grid.z = 1;
dim_block.x = BLOCK_SIZE;
dim_block.y = 1;
dim_block.z = 1;
VectorRandomValueAndNextDayPriceKernel<<<dim_grid,dim_block>>>(today_dev,randVector_dev,std_deviation,drift,next_day_dev);
//------------------------------------------------------------------
cudaMemcpy(next_day_host, next_day_dev, today.length * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(today_dev.elements);
cudaFree(next_day_dev);
cudaFree(randVector_dev);
return next_day_host;
}
void VectorPercentChange(const Vector A, Vector C){
//Interface host call to the device kernel code and invoke the kernel
//cudaError_t cuda_ret;
Vector d_A, d_C;
// steps:
// 1. allocate device vectors d_A and d_C with length same as input vector
d_A = AllocateDeviceVector(A);
d_C = AllocateDeviceVector(C);
// 2. copy A to d_A,
CopyToDeviceVector(d_A, A);
//CopyToDeviceVector(d_C, C);
// 3. launch kernel to compute d_C = d_An[1]/d_An[x-1]
dim3 dim_grid, dim_block;
dim_block.x = BLOCK_SIZE; dim_block.y = dim_block.z = 1;
dim_grid.x = A.length / dim_block.x;
if(A.length % dim_block.x != 0) dim_grid.x++;
dim_grid.y = 1;
dim_grid.z = 1;
VectorPercentChangeKernel<<<dim_grid, dim_block>>>(d_A, d_C);
// 4. copy d_C back to host vector C
CopyFromDeviceVector(C,d_C);
// 5. free device vectors d_A, d_B, d_C
cudaFree(d_A.elements);
cudaFree(d_C.elements);
}
float* UniformNumberGenerator(int size,char * pickRNG){
curandGenerator_t gen;
float *devData, *host;
host = (float *)calloc(size, sizeof(float));
cudaMalloc((void**) &devData, size*sizeof(float));
if(strcmp(pickRNG,"mt1") == 0){
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MTGP32); //Mersenne Twister family number 1
}else if(strcmp(pickRNG,"mt2")== 0){
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MT19937); //Mersenne Twister family number 2
}else if(strcmp(pickRNG,"lfsr")== 0){
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_XORWOW); //linear feedback shift register
}else if(strcmp(pickRNG,"mrg")== 0){
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MRG32K3A); //multiple recursive generator
}else if(strcmp(pickRNG,"phi")== 0){
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_PHILOX4_32_10); //Philox
}else if(strcmp(pickRNG,"sobol")== 0){
curandCreateGenerator(&gen, CURAND_RNG_QUASI_SOBOL32); //SOBOL
}
long randvalue = (long)rand();
//printf("%ld\n", randvalue);
curandSetPseudoRandomGeneratorSeed(gen, randvalue);
curandGenerateUniform(gen, devData, size);
cudaMemcpy(host, devData, size * sizeof(float), cudaMemcpyDeviceToHost);
curandDestroyGenerator(gen);
cudaFree(devData);
return host;
}
//Vector A: Input
//Vector D: output
//mean: C.elements[0]
float VectorVariance(const Vector A,float mean){
Vector d_A;
Vector d_D;
Vector D = AllocateVector(A.length, 0, 0.0f);
d_A = AllocateDeviceVector(A);
d_D = AllocateDeviceVector(D);
CopyToDeviceVector(d_A, A);
dim3 dimGrid, dimBlock;
dimBlock.x = BLOCK_SIZE;
dimBlock.y = 1;
dimBlock.z = 1;
dimGrid.x = (VSIZE ) /(BLOCK_SIZE);
if(VSIZE % BLOCK_SIZE != 0) {dimGrid.x++;}
dimGrid.y = 1;
dimGrid.z = 1;
VectorVarianceKernel <<< dimGrid, dimBlock >>>(d_A, d_D,mean);
CopyFromDeviceVector(D,d_D);
cudaFree(d_D.elements);
float result = thrust::reduce(thrust::host, D.elements, D.elements + D.length,0.0f,thrust::plus<float>());
float VAR = result/D.length;
free(D.elements);
return VAR;
}
Vector AllocateVector(int length, int init, float initvalue)
{
Vector V;
V.length = length;
V.elements = NULL;
V.elements = (float*) malloc(length*sizeof(float));
for(unsigned int i = 0; i < V.length; i++)
{
if(init == 0){
V.elements[i] = initvalue;
}else if(init == 1){
V.elements[i] = (rand()/(float)RAND_MAX);
}
}
return V;
}
Vector AllocateDeviceVector(const Vector V)
{
Vector Vdevice = V;
int size = V.length * sizeof(float);
cudaError_t cuda_ret = cudaMalloc((void**)&Vdevice.elements, size);
if(cuda_ret != cudaSuccess) {
printf("Unable to allocate device memory");
exit(0);
}
return Vdevice;
}
void CopyToDeviceVector(Vector Vdevice, const Vector Vhost)
{
int size = Vhost.length * sizeof(float);
Vdevice.length = Vhost.length;
cudaMemcpy(Vdevice.elements, Vhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device vector to a host vector.
void CopyFromDeviceVector(Vector Vhost, const Vector Vdevice)
{
int size = Vdevice.length * sizeof(float);
cudaMemcpy(Vhost.elements, Vdevice.elements, size,
cudaMemcpyDeviceToHost);
}
const char* getfield(char* line, int num)
{
const char* tok;
for (tok = strtok(line, ",");tok && *tok;tok = strtok(NULL, ",\n"))
{
if (!--num)
return tok;
}
return NULL;
}
void ReadFile(float* stock_data, char* file_name, int nums)
{
FILE* stream = fopen(file_name, "r");
char line[1024];
fgets(line, 1024, stream);
for(int i = 0; i < nums; i++) {
fgets(line, 1024, stream);
char* tmp = strdup(line);
stock_data[i] = atof(getfield(tmp, 2));
free(tmp);
}
}
// Write a floating point vector to file
void WriteFile(Vector V, char* file_name)
{
FILE* output = fopen(file_name, "w");
for (unsigned i = 0; i < V.length; i++) {
fprintf(output, "%f\n", V.elements[i]);
}
fclose(output);
}
// print float array
void PrintData(float* data, int size){
//Print out the result
for(int i = 0; i < size ; i++){
printf(" %9.6f ", data[i]);
}
printf("\n");
}
void PrintVector(Vector M){
printf("---------------------------------\n");
for (int i = 0; i < M.length; i++){
printf("%f|", M.elements[i]);
}
printf("\n");
}
//------------------------------sequential code-------------------------
float computeGold_percentChange(float* C, const float* A, unsigned int N){
float average_daily;
unsigned int i = 0;
C[i] = 0;
for (i=1; i < N; i++){
C[i] = logf((A[i])/(A[i-1]));
}
float total= 0.0f;
for (int i=1; i < N; i++){
total += C[i];
}
average_daily = total/(N-1);
return average_daily;
}
float computeGold_stdDev(float* C, float average_daily, unsigned int N){
float variance, std_deviation;
float sum1=0.0f;
for (int i = 1; i < N; i++)
{
sum1 = sum1 + pow((C[i] - average_daily), 2);
}
variance = sum1 / (float)(N-1);
std_deviation = sqrt(variance);
return std_deviation;
}
float* computeGold_randVal(float std_dev, int size, float* input){
float* rand_num;
rand_num = (float*)malloc(size * sizeof(float));
for(int i = 0; i < DAYS; i++){
for(int j = 0; j < SIM_SIZE; j++){
//rand_num[j* DAYS + i] = std_dev * normcdfinvf(input[j* DAYS + i]);
rand_num[i* SIM_SIZE + j] = std_dev * normcdfinvf(input[i* SIM_SIZE + j]);
}
}
return rand_num;
}
float* computeGold_NextVal(float* today, float drift, float* rand_val){
for(int i = 0; i < DAYS; i++){
for(int j = 0; j < SIM_SIZE; j++){
if(i == 0){
today[i* SIM_SIZE + j] = today[i* SIM_SIZE + j]* exp( drift +rand_val[i* SIM_SIZE + j]);
}
else{
today[i* SIM_SIZE + j] = today[(i-1)* SIM_SIZE + j]* exp( drift +rand_val[i* SIM_SIZE + j]);
}
}
}
return today;
}
|
a904a9ee191fc0ae542f71ed0c13de0edde65ded.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
__global__ void add(int a, int b, int *c) {
*c = a+b;
}
int main(void){
int c;
int *dev_c;
hipMalloc((void**)&dev_c, sizeof(int));
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2,7,dev_c);
hipMemcpy(&c, dev_c, sizeof(int),hipMemcpyDeviceToHost);
printf("2+7=%d\n",c);
hipFree(dev_c);
} | a904a9ee191fc0ae542f71ed0c13de0edde65ded.cu | #include <stdlib.h>
#include <stdio.h>
__global__ void add(int a, int b, int *c) {
*c = a+b;
}
int main(void){
int c;
int *dev_c;
cudaMalloc((void**)&dev_c, sizeof(int));
add<<<1,1>>>(2,7,dev_c);
cudaMemcpy(&c, dev_c, sizeof(int),cudaMemcpyDeviceToHost);
printf("2+7=%d\n",c);
cudaFree(dev_c);
} |
42a2993247d79ee28b864b7333449afce0be0302.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zswapdblk.cu, normal z -> c, Mon Jun 25 18:24:13 2018
*/
#include "magma_internal.h"
/******************************************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
cswapdblk_kernel( int nb,
magmaFloatComplex *dA, int ldda, int inca,
magmaFloatComplex *dB, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
magmaFloatComplex tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
/***************************************************************************//**
Purpose
-------
cswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = n/nb blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA.
LDDA >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB COMPLEX array, dimension (LDDB,N)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array db.
LDDB >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_swapdblk
*******************************************************************************/
extern "C" void
magmablas_cswapdblk(
magma_int_t n, magma_int_t nb,
magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaFloatComplex_ptr dB, magma_int_t lddb, magma_int_t incb,
magma_queue_t queue )
{
magma_int_t nblocks = n / nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( nblocks > 0 ) {
hipLaunchKernelGGL(( cswapdblk_kernel), dim3(nblocks), dim3(nb), 0, queue->cuda_stream() ,
nb, dA, ldda, inca,
dB, lddb, incb );
}
}
| 42a2993247d79ee28b864b7333449afce0be0302.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zswapdblk.cu, normal z -> c, Mon Jun 25 18:24:13 2018
*/
#include "magma_internal.h"
/******************************************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
cswapdblk_kernel( int nb,
magmaFloatComplex *dA, int ldda, int inca,
magmaFloatComplex *dB, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
magmaFloatComplex tmp;
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
/***************************************************************************//**
Purpose
-------
cswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = n/nb blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA COMPLEX array, dimension (LDDA,N)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA.
LDDA >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB COMPLEX array, dimension (LDDB,N)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array db.
LDDB >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_swapdblk
*******************************************************************************/
extern "C" void
magmablas_cswapdblk(
magma_int_t n, magma_int_t nb,
magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t inca,
magmaFloatComplex_ptr dB, magma_int_t lddb, magma_int_t incb,
magma_queue_t queue )
{
magma_int_t nblocks = n / nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( nblocks > 0 ) {
cswapdblk_kernel<<< nblocks, nb, 0, queue->cuda_stream() >>>
( nb, dA, ldda, inca,
dB, lddb, incb );
}
}
|
e8792cd27d1bfd56a54b155acf58e2efa2796294.hip | // !!! This is a file automatically generated by hipify!!!
/*------------------------------------------------------------------------
CUDA C extension for Python
Provides functionality for forward and back projection in PET image
reconstruction.
author: Pawel Markiewicz
Copyrights: 2019
------------------------------------------------------------------------*/
#define PY_SSIZE_T_CLEAN
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION // NPY_API_VERSION
#include "def.h"
#include "pycuvec.cuh"
#include <Python.h>
#include <numpy/arrayobject.h>
#include <stdlib.h>
#include "prjb.h"
#include "prjf.h"
#include "tprj.h"
#include "recon.h"
#include "scanner_0.h"
//===================== START PYTHON INIT ==============================
//--- Available functions
static PyObject *trnx_prj(PyObject *self, PyObject *args);
static PyObject *frwd_prj(PyObject *self, PyObject *args, PyObject *kwargs);
static PyObject *back_prj(PyObject *self, PyObject *args, PyObject *kwargs);
static PyObject *osem_rec(PyObject *self, PyObject *args);
//---
//> Module Method Table
static PyMethodDef petprj_methods[] = {
{"tprj", trnx_prj, METH_VARARGS, "Transaxial projector."},
{"fprj", (PyCFunction)frwd_prj, METH_VARARGS | METH_KEYWORDS, "PET forward projector."},
{"bprj", (PyCFunction)back_prj, METH_VARARGS | METH_KEYWORDS, "PET back projector."},
{"osem", osem_rec, METH_VARARGS, "OSEM reconstruction of PET data."},
{NULL, NULL, 0, NULL} // Sentinel
};
//> Module Definition Structure
static struct PyModuleDef petprj_module = {
PyModuleDef_HEAD_INIT,
"petprj", //> name of module
//> module documentation, may be NULL
"This module provides an interface for GPU routines of PET forward and back projection.",
-1, //> the module keeps state in global variables.
petprj_methods};
//> Initialization function
PyMODINIT_FUNC PyInit_petprj(void) {
Py_Initialize();
//> load NumPy functionality
import_array();
return PyModule_Create(&petprj_module);
}
//====================== END PYTHON INIT ===============================
//==============================================================================
// T R A N S A X I A L P R O J E C T O R
//------------------------------------------------------------------------------
static PyObject *trnx_prj(PyObject *self, PyObject *args) {
// Structure of constants
Cnst Cnt;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject *o_txLUT;
// input/output image
PyObject *o_im;
// input/output projection sinogram
PyObject *o_prjout;
// output transaxial sampling parameters
PyObject *o_tv;
PyObject *o_tt;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "OOOOOO", &o_prjout, &o_im, &o_tv, &o_tt, &o_txLUT, &o_mmrcnst))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
// transaxial sino LUTs:
PyObject *pd_crs = PyDict_GetItemString(o_txLUT, "crs");
PyObject *pd_s2c = PyDict_GetItemString(o_txLUT, "s2c");
// sino to crystal, crystals
PyArrayObject *p_s2c = NULL, *p_crs = NULL;
p_s2c = (PyArrayObject *)PyArray_FROM_OTF(pd_s2c, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_crs = (PyArrayObject *)PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
// image object
PyArrayObject *p_im = NULL;
p_im = (PyArrayObject *)PyArray_FROM_OTF(o_im, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
// output sino object
PyArrayObject *p_prjout = NULL;
p_prjout = (PyArrayObject *)PyArray_FROM_OTF(o_prjout, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
// transaxial voxel sampling (ray-driven)
PyArrayObject *p_tv = NULL;
p_tv = (PyArrayObject *)PyArray_FROM_OTF(o_tv, NPY_UINT8, NPY_ARRAY_INOUT_ARRAY2);
// transaxial parameters for voxel sampling (ray-driven)
PyArrayObject *p_tt = NULL;
p_tt = (PyArrayObject *)PyArray_FROM_OTF(o_tt, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
//--
/* If that didn't work, throw an exception. */
if (p_s2c == NULL || p_im == NULL || p_crs == NULL || p_prjout == NULL || p_tv == NULL ||
p_tt == NULL) {
// sino 2 crystals
Py_XDECREF(p_s2c);
Py_XDECREF(p_crs);
// image object
PyArray_DiscardWritebackIfCopy(p_im);
Py_XDECREF(p_im);
// output sino object
PyArray_DiscardWritebackIfCopy(p_prjout);
Py_XDECREF(p_prjout);
// transaxial outputs
PyArray_DiscardWritebackIfCopy(p_tv);
Py_XDECREF(p_tv);
PyArray_DiscardWritebackIfCopy(p_tt);
Py_XDECREF(p_tt);
return NULL;
}
short *s2c = (short *)PyArray_DATA(p_s2c);
float *crs = (float *)PyArray_DATA(p_crs);
int N0crs = PyArray_DIM(p_crs, 0);
int N1crs = PyArray_DIM(p_crs, 1);
if (Cnt.LOG <= LOGDEBUG) printf("\ni> N0crs=%d, N1crs=%d\n", N0crs, N1crs);
float *im = (float *)PyArray_DATA(p_im);
if (Cnt.LOG <= LOGDEBUG)
printf("i> forward-projection image dimensions: %ld, %ld\n", PyArray_DIM(p_im, 0),
PyArray_DIM(p_im, 1));
// input/output projection sinogram
float *prjout = (float *)PyArray_DATA(p_prjout);
// output sampling
unsigned char *tv = (unsigned char *)PyArray_DATA(p_tv);
float *tt = (float *)PyArray_DATA(p_tt);
// CUDA --------------------------------------------------------------------
// sets the device on which to calculate
HANDLE_ERROR(hipSetDevice(Cnt.DEVID));
int dev_id;
hipGetDevice(&dev_id);
if (Cnt.LOG <= LOGDEBUG) printf("i> using CUDA device #%d\n", dev_id);
//--- TRANSAXIAL COMPONENTS
float4 *d_crs;
HANDLE_ERROR(hipMalloc(&d_crs, N0crs * sizeof(float4)));
HANDLE_ERROR(hipMemcpy(d_crs, crs, N0crs * sizeof(float4), hipMemcpyHostToDevice));
short2 *d_s2c;
HANDLE_ERROR(hipMalloc(&d_s2c, AW * sizeof(short2)));
HANDLE_ERROR(hipMemcpy(d_s2c, s2c, AW * sizeof(short2), hipMemcpyHostToDevice));
float *d_tt;
HANDLE_ERROR(hipMalloc(&d_tt, N_TT * AW * sizeof(float)));
unsigned char *d_tv;
HANDLE_ERROR(hipMalloc(&d_tv, N_TV * AW * sizeof(unsigned char)));
HANDLE_ERROR(hipMemset(d_tv, 0, N_TV * AW * sizeof(unsigned char)));
//------------DO TRANSAXIAL CALCULATIONS------------------------------------
gpu_siddon_tx(d_crs, d_s2c, d_tt, d_tv);
//--------------------------------------------------------------------------
HANDLE_ERROR(hipMemcpy(tt, d_tt, N_TT * AW * sizeof(float), hipMemcpyDeviceToHost));
HANDLE_ERROR(hipMemcpy(tv, d_tv, N_TV * AW * sizeof(unsigned char), hipMemcpyDeviceToHost));
// CUDA END-----------------------------------------------------------------
// Clean up
Py_DECREF(p_s2c);
Py_DECREF(p_crs);
PyArray_ResolveWritebackIfCopy(p_im);
Py_DECREF(p_im);
PyArray_ResolveWritebackIfCopy(p_tv);
Py_DECREF(p_tv);
PyArray_ResolveWritebackIfCopy(p_tt);
Py_DECREF(p_tt);
PyArray_ResolveWritebackIfCopy(p_prjout);
Py_DECREF(p_prjout);
Py_INCREF(Py_None);
return Py_None;
}
//------------------------------------------------------------------------------
//==============================================================================
// F O R W A R D P R O J E C T O R
//------------------------------------------------------------------------------
static PyObject *frwd_prj(PyObject *self, PyObject *args, PyObject *kwargs) {
// Structure of constants
Cnst Cnt;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// axial LUT dictionary. contains such LUTs: li2rno, li2sn, li2nos.
PyObject *o_axLUT;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject *o_txLUT;
// input image to be forward projected (reshaped for GPU execution)
PyCuVec<float> *o_im = NULL;
// subsets for OSEM, first the default
PyObject *o_subs;
// output projection sino
PyCuVec<float> *o_prjout = NULL;
// flag for attenuation factors to be found based on mu-map; if 0 normal emission projection is
// used
int att;
bool SYNC = true; // whether to ensure deviceToHost copy on return
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
static const char *kwds[] = {"sino", "im", "txLUT", "axLUT", "subs",
"cnst", "att", "sync", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&OOOOi|b", (char **)kwds, &asPyCuVec_f,
&o_prjout, &asPyCuVec_f, &o_im, &o_txLUT, &o_axLUT, &o_subs,
&o_mmrcnst, &att, &SYNC))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject *pd_span = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (char)PyLong_AsLong(pd_span);
PyObject *pd_rngstrt = PyDict_GetItemString(o_mmrcnst, "RNG_STRT");
Cnt.RNG_STRT = (char)PyLong_AsLong(pd_rngstrt);
PyObject *pd_rngend = PyDict_GetItemString(o_mmrcnst, "RNG_END");
Cnt.RNG_END = (char)PyLong_AsLong(pd_rngend);
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
/* Interpret the input objects as numpy arrays. */
// axial LUTs:
PyObject *pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno");
PyObject *pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn");
PyObject *pd_li2sn1 = PyDict_GetItemString(o_axLUT, "li2sn1");
PyObject *pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos");
PyObject *pd_li2rng = PyDict_GetItemString(o_axLUT, "li2rng");
//-- get the arrays from the dictionaries
// axLUTs
PyArrayObject *p_li2rno = NULL, *p_li2sn1 = NULL, *p_li2sn = NULL;
PyArrayObject *p_li2nos = NULL, *p_li2rng = NULL;
p_li2rno = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rno, NPY_INT8, NPY_ARRAY_IN_ARRAY);
p_li2sn1 = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn1, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_li2sn = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_li2nos = (PyArrayObject *)PyArray_FROM_OTF(pd_li2nos, NPY_INT8, NPY_ARRAY_IN_ARRAY);
p_li2rng = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
// transaxial sino LUTs:
PyObject *pd_crs = PyDict_GetItemString(o_txLUT, "crs");
PyObject *pd_s2c = PyDict_GetItemString(o_txLUT, "s2c");
PyObject *pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali");
// sino to crystal, crystals
PyArrayObject *p_s2c = NULL, *p_crs = NULL, *p_aw2ali = NULL;
p_s2c = (PyArrayObject *)PyArray_FROM_OTF(pd_s2c, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_crs = (PyArrayObject *)PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
p_aw2ali = (PyArrayObject *)PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_ARRAY_IN_ARRAY);
// subsets if using e.g., OSEM
PyArrayObject *p_subs = NULL;
p_subs = (PyArrayObject *)PyArray_FROM_OTF(o_subs, NPY_INT32, NPY_ARRAY_IN_ARRAY);
//--
/* If that didn't work, throw an exception. */
if (p_li2rno == NULL || p_li2sn == NULL || p_li2sn1 == NULL || p_li2nos == NULL ||
p_aw2ali == NULL || p_s2c == NULL || p_crs == NULL || p_subs == NULL || p_li2rng == NULL) {
// axLUTs
Py_XDECREF(p_li2rno);
Py_XDECREF(p_li2sn);
Py_XDECREF(p_li2sn1);
Py_XDECREF(p_li2nos);
Py_XDECREF(p_li2rng);
// 2D sino LUT
Py_XDECREF(p_aw2ali);
// sino 2 crystals
Py_XDECREF(p_s2c);
Py_XDECREF(p_crs);
// subset definition object
Py_XDECREF(p_subs);
return NULL;
}
int *subs_ = (int *)PyArray_DATA(p_subs);
short *s2c = (short *)PyArray_DATA(p_s2c);
int *aw2ali = (int *)PyArray_DATA(p_aw2ali);
short *li2sn;
if (Cnt.SPN == 11) {
li2sn = (short *)PyArray_DATA(p_li2sn);
} else if (Cnt.SPN == 1) {
li2sn = (short *)PyArray_DATA(p_li2sn1);
}
char *li2nos = (char *)PyArray_DATA(p_li2nos);
float *li2rng = (float *)PyArray_DATA(p_li2rng);
float *crs = (float *)PyArray_DATA(p_crs);
if (Cnt.LOG <= LOGDEBUG)
printf("i> forward-projection image dimensions: %ld, %ld, %ld\n", o_im->shape[0],
o_im->shape[1], o_im->shape[2]);
int Nprj = PyArray_DIM(p_subs, 0);
int N0crs = PyArray_DIM(p_crs, 0);
int N1crs = PyArray_DIM(p_crs, 1);
int Naw = PyArray_DIM(p_aw2ali, 0);
if (Cnt.LOG <= LOGDEBUG)
printf("\ni> N0crs=%d, N1crs=%d, Naw=%d, Nprj=%d\n", N0crs, N1crs, Naw, Nprj);
int *subs;
if (subs_[0] == -1) {
Nprj = AW;
if (Cnt.LOG <= LOGDEBUG)
printf("i> no subsets defined. number of projection bins in 2D: %d\n", Nprj);
// all projections in
subs = (int *)malloc(Nprj * sizeof(int));
for (int i = 0; i < Nprj; i++) { subs[i] = i; }
} else {
if (Cnt.LOG <= LOGDEBUG)
printf("i> subsets defined. number of subset projection bins in 2D: %d\n", Nprj);
subs = subs_;
}
// sets the device on which to calculate
HANDLE_ERROR(hipSetDevice(Cnt.DEVID));
//<><><><><><><<><><><><><><><><><><><><><><><><><<><><><><><><><><><><><><><><><><><><<><><><><><><><><><><>
//--- TRANSAXIAL COMPONENT
float4 *d_crs;
HANDLE_ERROR(hipMalloc(&d_crs, N0crs * sizeof(float4)));
HANDLE_ERROR(hipMemcpy(d_crs, crs, N0crs * sizeof(float4), hipMemcpyHostToDevice));
short2 *d_s2c;
HANDLE_ERROR(hipMalloc(&d_s2c, AW * sizeof(short2)));
HANDLE_ERROR(hipMemcpy(d_s2c, s2c, AW * sizeof(short2), hipMemcpyHostToDevice));
float *d_tt;
HANDLE_ERROR(hipMalloc(&d_tt, N_TT * AW * sizeof(float)));
unsigned char *d_tv;
HANDLE_ERROR(hipMalloc(&d_tv, N_TV * AW * sizeof(unsigned char)));
HANDLE_ERROR(hipMemset(d_tv, 0, N_TV * AW * sizeof(unsigned char)));
// array of subset projection bins
int *d_subs;
HANDLE_ERROR(hipMalloc(&d_subs, Nprj * sizeof(int)));
HANDLE_ERROR(hipMemcpy(d_subs, subs, Nprj * sizeof(int), hipMemcpyHostToDevice));
gpu_fprj(o_prjout->vec.data(), o_im->vec.data(), li2rng, li2sn, li2nos, d_s2c, aw2ali, d_crs,
d_subs, d_tt, d_tv, Nprj, Naw, Cnt, att, SYNC);
HANDLE_ERROR(hipFree(d_subs));
HANDLE_ERROR(hipFree(d_tv));
HANDLE_ERROR(hipFree(d_tt));
HANDLE_ERROR(hipFree(d_s2c));
HANDLE_ERROR(hipFree(d_crs));
//<><><><><><><><<><><><><><><><><><><><><><><><><<><><><><><><><><><><><><><><><><><><<><><><><><><><><><><>
// Clean up
Py_DECREF(p_li2rno);
Py_DECREF(p_li2rng);
Py_DECREF(p_li2sn);
Py_DECREF(p_li2sn1);
Py_DECREF(p_li2nos);
Py_DECREF(p_aw2ali);
Py_DECREF(p_s2c);
Py_DECREF(p_crs);
Py_DECREF(p_subs);
if (subs_[0] == -1) free(subs);
Py_INCREF(Py_None);
return Py_None;
}
//==============================================================================
// B A C K P R O J E C T O R
//------------------------------------------------------------------------------
static PyObject *back_prj(PyObject *self, PyObject *args, PyObject *kwargs) {
// Structure of constants
Cnst Cnt;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// axial LUT dicionary. contains such LUTs: li2rno, li2sn, li2nos.
PyObject *o_axLUT;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject *o_txLUT;
// sino to be back projected to image (both reshaped for GPU execution)
PyCuVec<float> *o_sino = NULL;
// subsets for OSEM, first the default
PyObject *o_subs;
// output backprojected image
PyCuVec<float> *o_bimg = NULL;
bool SYNC = true; // whether to ensure deviceToHost copy on return
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
static const char *kwds[] = {"bimg", "sino", "txLUT", "axLUT", "subs", "cnst", "sync", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&OOOO|b", (char **)kwds, &asPyCuVec_f,
&o_bimg, &asPyCuVec_f, &o_sino, &o_txLUT, &o_axLUT, &o_subs,
&o_mmrcnst, &SYNC))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject *pd_span = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (char)PyLong_AsLong(pd_span);
PyObject *pd_rngstrt = PyDict_GetItemString(o_mmrcnst, "RNG_STRT");
Cnt.RNG_STRT = (char)PyLong_AsLong(pd_rngstrt);
PyObject *pd_rngend = PyDict_GetItemString(o_mmrcnst, "RNG_END");
Cnt.RNG_END = (char)PyLong_AsLong(pd_rngend);
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
/* Interpret the input objects as numpy arrays. */
// axial LUTs:
PyObject *pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno");
PyObject *pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn");
PyObject *pd_li2sn1 = PyDict_GetItemString(o_axLUT, "li2sn1");
PyObject *pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos");
PyObject *pd_li2rng = PyDict_GetItemString(o_axLUT, "li2rng");
// transaxial sino LUTs:
PyObject *pd_crs = PyDict_GetItemString(o_txLUT, "crs");
PyObject *pd_s2c = PyDict_GetItemString(o_txLUT, "s2c");
//-- get the arrays from the dictionaries
// axLUTs
PyArrayObject *p_li2rno = NULL, *p_li2sn1 = NULL, *p_li2sn = NULL;
PyArrayObject *p_li2nos = NULL, *p_li2rng = NULL;
p_li2rno = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rno, NPY_INT8, NPY_ARRAY_IN_ARRAY);
p_li2sn1 = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn1, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_li2sn = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_li2nos = (PyArrayObject *)PyArray_FROM_OTF(pd_li2nos, NPY_INT8, NPY_ARRAY_IN_ARRAY);
p_li2rng = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
// sino to crystal, crystals
PyArrayObject *p_s2c = NULL, *p_crs = NULL;
p_s2c = (PyArrayObject *)PyArray_FROM_OTF(pd_s2c, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_crs = (PyArrayObject *)PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
// subsets if using e.g., OSEM
PyArrayObject *p_subs = NULL;
p_subs = (PyArrayObject *)PyArray_FROM_OTF(o_subs, NPY_INT32, NPY_ARRAY_IN_ARRAY);
//--
/* If that didn't work, throw an exception. */
if (p_li2rno == NULL || p_li2sn == NULL || p_li2sn1 == NULL || p_li2nos == NULL ||
p_s2c == NULL || p_crs == NULL || p_subs == NULL || p_li2rng == NULL) {
// axLUTs
Py_XDECREF(p_li2rno);
Py_XDECREF(p_li2sn);
Py_XDECREF(p_li2sn1);
Py_XDECREF(p_li2nos);
Py_XDECREF(p_li2rng);
// sino 2 crystals
Py_XDECREF(p_s2c);
Py_XDECREF(p_crs);
// subset definition object
Py_XDECREF(p_subs);
return NULL;
}
int *subs_ = (int *)PyArray_DATA(p_subs);
short *s2c = (short *)PyArray_DATA(p_s2c);
short *li2sn;
if (Cnt.SPN == 11) {
li2sn = (short *)PyArray_DATA(p_li2sn);
} else if (Cnt.SPN == 1) {
li2sn = (short *)PyArray_DATA(p_li2sn1);
}
char *li2nos = (char *)PyArray_DATA(p_li2nos);
float *li2rng = (float *)PyArray_DATA(p_li2rng);
float *crs = (float *)PyArray_DATA(p_crs);
int Nprj = PyArray_DIM(p_subs, 0);
int N0crs = PyArray_DIM(p_crs, 0);
int N1crs = PyArray_DIM(p_crs, 1);
int *subs;
if (subs_[0] == -1) {
Nprj = AW;
if (Cnt.LOG <= LOGDEBUG)
printf("\ni> no subsets defined. number of projection bins in 2D: %d\n", Nprj);
// all projections in
subs = (int *)malloc(Nprj * sizeof(int));
for (int i = 0; i < Nprj; i++) { subs[i] = i; }
} else {
if (Cnt.LOG <= LOGDEBUG)
printf("\ni> subsets defined. number of subset projection bins in 2D: %d\n", Nprj);
subs = subs_;
}
if (Cnt.LOG <= LOGDEBUG)
printf("i> back-projection image dimensions: %ld, %ld, %ld\n", o_bimg->shape[0],
o_bimg->shape[1], o_bimg->shape[2]);
// sets the device on which to calculate
HANDLE_ERROR(hipSetDevice(Cnt.DEVID));
//<><><<><><><><><><><><><><><><><><><><><<><><><><<><><><><><><><><><><><><><><><><><<><><><><><><>
float4 *d_crs;
HANDLE_ERROR(hipMalloc(&d_crs, N0crs * sizeof(float4)));
HANDLE_ERROR(hipMemcpy(d_crs, crs, N0crs * sizeof(float4), hipMemcpyHostToDevice));
short2 *d_s2c;
HANDLE_ERROR(hipMalloc(&d_s2c, AW * sizeof(short2)));
HANDLE_ERROR(hipMemcpy(d_s2c, s2c, AW * sizeof(short2), hipMemcpyHostToDevice));
float *d_tt;
HANDLE_ERROR(hipMalloc(&d_tt, N_TT * AW * sizeof(float)));
unsigned char *d_tv;
HANDLE_ERROR(hipMalloc(&d_tv, N_TV * AW * sizeof(unsigned char)));
HANDLE_ERROR(hipMemset(d_tv, 0, N_TV * AW * sizeof(unsigned char)));
// array of subset projection bins
int *d_subs;
HANDLE_ERROR(hipMalloc(&d_subs, Nprj * sizeof(int)));
HANDLE_ERROR(hipMemcpy(d_subs, subs, Nprj * sizeof(int), hipMemcpyHostToDevice));
gpu_bprj(o_bimg->vec.data(), o_sino->vec.data(), li2rng, li2sn, li2nos, d_s2c, d_crs, d_subs,
d_tt, d_tv, Nprj, Cnt, SYNC);
HANDLE_ERROR(hipFree(d_subs));
HANDLE_ERROR(hipFree(d_tv));
HANDLE_ERROR(hipFree(d_tt));
HANDLE_ERROR(hipFree(d_s2c));
HANDLE_ERROR(hipFree(d_crs));
//<><><><><><><><><><><>><><><><><><><><><<><><><><<><><><><><><><><><><><><><><><><><<><><><><><><>
// Clean up
Py_DECREF(p_li2rno);
Py_DECREF(p_li2rng);
Py_DECREF(p_li2sn);
Py_DECREF(p_li2sn1);
Py_DECREF(p_li2nos);
Py_DECREF(p_s2c);
Py_DECREF(p_crs);
Py_DECREF(p_subs);
if (subs_[0] == -1) free(subs);
Py_INCREF(Py_None);
return Py_None;
}
//==============================================================================
// O S E M R E C O N S T R U C T I O N
//------------------------------------------------------------------------------
static PyObject *osem_rec(PyObject *self, PyObject *args) {
// Structure of constants
Cnst Cnt;
// output image
PyObject *o_imgout;
// output image mask
PyObject *o_rcnmsk;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// axial LUT dicionary. contains such LUTs: li2rno, li2sn, li2nos.
PyObject *o_axLUT;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject *o_txLUT;
// subsets for OSEM, first the default
PyObject *o_subs;
// separable kernel matrix, for x, y, and z dimensions
PyObject *o_krnl;
// sinos using in reconstruction (reshaped for GPU execution)
PyObject *o_psng; // prompts (measured)
PyObject *o_rsng; // randoms
PyObject *o_ssng; // scatter
PyObject *o_nsng; // norm
PyObject *o_asng; // attenuation
// sensitivity image
PyObject *o_imgsens;
/* ^^^^^^^^^^^^^^^^^^^^^^^ Parse the input tuple ^^^^^^^^^^^^^^^^^^^^^^^^^^^ */
if (!PyArg_ParseTuple(args, "OOOOOOOOOOOOO", &o_imgout, &o_psng, &o_rsng, &o_ssng, &o_nsng,
&o_asng, &o_subs, &o_imgsens, &o_rcnmsk, &o_krnl, &o_txLUT, &o_axLUT,
&o_mmrcnst))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_span = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (char)PyLong_AsLong(pd_span);
PyObject *pd_sigma_rm = PyDict_GetItemString(o_mmrcnst, "SIGMA_RM");
Cnt.SIGMA_RM = (float)PyFloat_AsDouble(pd_sigma_rm);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
/* Interpret the input objects as numpy arrays. */
// axial LUTs:
PyObject *pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno");
PyObject *pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn");
PyObject *pd_li2sn1 = PyDict_GetItemString(o_axLUT, "li2sn1");
PyObject *pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos");
PyObject *pd_li2rng = PyDict_GetItemString(o_axLUT, "li2rng");
// transaxial sino LUTs:
PyObject *pd_crs = PyDict_GetItemString(o_txLUT, "crs");
PyObject *pd_s2c = PyDict_GetItemString(o_txLUT, "s2c");
//-- get the arrays from the dictionaries
// output back-projection image
PyArrayObject *p_imgout = NULL;
p_imgout = (PyArrayObject *)PyArray_FROM_OTF(o_imgout, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
// image mask
PyArrayObject *p_rcnmsk = NULL;
p_rcnmsk = (PyArrayObject *)PyArray_FROM_OTF(o_rcnmsk, NPY_BOOL, NPY_ARRAY_IN_ARRAY);
// sensitivity image
PyArrayObject *p_imgsens = NULL;
p_imgsens = (PyArrayObject *)PyArray_FROM_OTF(o_imgsens, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
//> PSF kernel
PyArrayObject *p_krnl = NULL;
p_krnl = (PyArrayObject *)PyArray_FROM_OTF(o_krnl, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
//> sinogram objects
PyArrayObject *p_psng = NULL, *p_rsng = NULL, *p_ssng = NULL, *p_nsng = NULL, *p_asng = NULL;
p_psng = (PyArrayObject *)PyArray_FROM_OTF(o_psng, NPY_UINT16, NPY_ARRAY_IN_ARRAY);
p_rsng = (PyArrayObject *)PyArray_FROM_OTF(o_rsng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
p_ssng = (PyArrayObject *)PyArray_FROM_OTF(o_ssng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
p_nsng = (PyArrayObject *)PyArray_FROM_OTF(o_nsng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
p_asng = (PyArrayObject *)PyArray_FROM_OTF(o_asng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
// subset definition
PyArrayObject *p_subs = NULL;
p_subs = (PyArrayObject *)PyArray_FROM_OTF(o_subs, NPY_INT32, NPY_ARRAY_IN_ARRAY);
// axLUTs
PyArrayObject *p_li2rno = NULL, *p_li2sn1 = NULL, *p_li2sn = NULL;
PyArrayObject *p_li2nos = NULL, *p_li2rng = NULL;
p_li2rno = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rno, NPY_INT8, NPY_ARRAY_IN_ARRAY);
p_li2sn = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_li2sn1 = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn1, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_li2nos = (PyArrayObject *)PyArray_FROM_OTF(pd_li2nos, NPY_INT8, NPY_ARRAY_IN_ARRAY);
p_li2rng = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
// sino to crystal, crystals
PyArrayObject *p_s2c = NULL, *p_crs = NULL;
p_s2c = (PyArrayObject *)PyArray_FROM_OTF(pd_s2c, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_crs = (PyArrayObject *)PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
//--
/* If that didn't work, throw an exception. */
if (p_imgout == NULL || p_rcnmsk == NULL || p_subs == NULL || p_psng == NULL || p_rsng == NULL ||
p_ssng == NULL || p_nsng == NULL || p_asng == NULL || p_imgsens == NULL ||
p_li2rno == NULL || p_li2sn == NULL || p_li2sn1 == NULL || p_li2nos == NULL ||
p_s2c == NULL || p_crs == NULL || p_krnl == NULL) {
//> output image
PyArray_DiscardWritebackIfCopy(p_imgout);
Py_XDECREF(p_imgout);
Py_XDECREF(p_rcnmsk);
//> objects in the sinogram space
Py_XDECREF(p_psng);
Py_XDECREF(p_rsng);
Py_XDECREF(p_ssng);
Py_XDECREF(p_nsng);
Py_XDECREF(p_asng);
//> subsets
Py_XDECREF(p_subs);
//> objects in the image space
Py_XDECREF(p_imgsens);
Py_XDECREF(p_krnl);
//> axLUTs
Py_XDECREF(p_li2rno);
Py_XDECREF(p_li2sn);
Py_XDECREF(p_li2sn1);
Py_XDECREF(p_li2nos);
//> sinogram to crystal LUTs
Py_XDECREF(p_s2c);
Py_XDECREF(p_crs);
return NULL;
}
float *imgout = (float *)PyArray_DATA(p_imgout);
bool *rcnmsk = (bool *)PyArray_DATA(p_rcnmsk);
unsigned short *psng = (unsigned short *)PyArray_DATA(p_psng);
float *rsng = (float *)PyArray_DATA(p_rsng);
float *ssng = (float *)PyArray_DATA(p_ssng);
float *nsng = (float *)PyArray_DATA(p_nsng);
float *asng = (float *)PyArray_DATA(p_asng);
//> sensitivity image
float *imgsens = (float *)PyArray_DATA(p_imgsens);
//>--- PSF KERNEL ---
float *krnl;
int SZ_KRNL = (int)PyArray_DIM(p_krnl, 1);
if (Cnt.LOG <= LOGDEBUG) printf("d> kernel size [voxels]: %d\n", SZ_KRNL);
if (SZ_KRNL != KERNEL_LENGTH) {
if (Cnt.LOG <= LOGWARNING) printf("w> wrong kernel size.\n");
krnl = (float *)malloc(KERNEL_LENGTH * sizeof(float));
krnl[0] = -1;
} else {
krnl = (float *)PyArray_DATA(p_krnl);
}
//>-------------------
short *li2sn;
if (Cnt.SPN == 11) {
li2sn = (short *)PyArray_DATA(p_li2sn);
} else if (Cnt.SPN == 1) {
li2sn = (short *)PyArray_DATA(p_li2sn1);
}
char *li2nos = (char *)PyArray_DATA(p_li2nos);
float *li2rng = (float *)PyArray_DATA(p_li2rng);
float *crs = (float *)PyArray_DATA(p_crs);
short *s2c = (short *)PyArray_DATA(p_s2c);
int N0crs = PyArray_DIM(p_crs, 0);
int N1crs = PyArray_DIM(p_crs, 1);
// number of subsets
int Nsub = PyArray_DIM(p_subs, 0);
// number of elements used to store max. number of subsets projection - 1
int Nprj = PyArray_DIM(p_subs, 1);
if (Cnt.LOG <= LOGDEBUG)
printf("i> number of subsets = %d, and max. number of projections/subset = %d\n", Nsub,
Nprj - 1);
int *subs = (int *)PyArray_DATA(p_subs);
// sets the device on which to calculate
HANDLE_ERROR(hipSetDevice(Cnt.DEVID));
//<><><<><><><><<><><><><><><><><><><>
osem(imgout, rcnmsk, psng, rsng, ssng, nsng, asng, subs, imgsens, krnl, li2rng, li2sn, li2nos,
s2c, crs, Nsub, Nprj, N0crs, Cnt);
//<><><><><><><><<><><><>><><><><><><>
// Clean up
PyArray_ResolveWritebackIfCopy(p_imgout);
Py_DECREF(p_imgout);
Py_DECREF(p_rcnmsk);
Py_DECREF(p_psng);
Py_DECREF(p_rsng);
Py_DECREF(p_ssng);
Py_DECREF(p_nsng);
Py_DECREF(p_asng);
Py_DECREF(p_subs);
Py_DECREF(p_imgsens);
Py_DECREF(p_krnl);
Py_DECREF(p_li2rno);
Py_DECREF(p_li2rng);
Py_DECREF(p_li2sn);
Py_DECREF(p_li2sn1);
Py_DECREF(p_li2nos);
Py_DECREF(p_s2c);
Py_DECREF(p_crs);
Py_INCREF(Py_None);
return Py_None;
}
| e8792cd27d1bfd56a54b155acf58e2efa2796294.cu | /*------------------------------------------------------------------------
CUDA C extension for Python
Provides functionality for forward and back projection in PET image
reconstruction.
author: Pawel Markiewicz
Copyrights: 2019
------------------------------------------------------------------------*/
#define PY_SSIZE_T_CLEAN
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION // NPY_API_VERSION
#include "def.h"
#include "pycuvec.cuh"
#include <Python.h>
#include <numpy/arrayobject.h>
#include <stdlib.h>
#include "prjb.h"
#include "prjf.h"
#include "tprj.h"
#include "recon.h"
#include "scanner_0.h"
//===================== START PYTHON INIT ==============================
//--- Available functions
static PyObject *trnx_prj(PyObject *self, PyObject *args);
static PyObject *frwd_prj(PyObject *self, PyObject *args, PyObject *kwargs);
static PyObject *back_prj(PyObject *self, PyObject *args, PyObject *kwargs);
static PyObject *osem_rec(PyObject *self, PyObject *args);
//---
//> Module Method Table
static PyMethodDef petprj_methods[] = {
{"tprj", trnx_prj, METH_VARARGS, "Transaxial projector."},
{"fprj", (PyCFunction)frwd_prj, METH_VARARGS | METH_KEYWORDS, "PET forward projector."},
{"bprj", (PyCFunction)back_prj, METH_VARARGS | METH_KEYWORDS, "PET back projector."},
{"osem", osem_rec, METH_VARARGS, "OSEM reconstruction of PET data."},
{NULL, NULL, 0, NULL} // Sentinel
};
//> Module Definition Structure
static struct PyModuleDef petprj_module = {
PyModuleDef_HEAD_INIT,
"petprj", //> name of module
//> module documentation, may be NULL
"This module provides an interface for GPU routines of PET forward and back projection.",
-1, //> the module keeps state in global variables.
petprj_methods};
//> Initialization function
PyMODINIT_FUNC PyInit_petprj(void) {
Py_Initialize();
//> load NumPy functionality
import_array();
return PyModule_Create(&petprj_module);
}
//====================== END PYTHON INIT ===============================
//==============================================================================
// T R A N S A X I A L P R O J E C T O R
//------------------------------------------------------------------------------
static PyObject *trnx_prj(PyObject *self, PyObject *args) {
// Structure of constants
Cnst Cnt;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject *o_txLUT;
// input/output image
PyObject *o_im;
// input/output projection sinogram
PyObject *o_prjout;
// output transaxial sampling parameters
PyObject *o_tv;
PyObject *o_tt;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "OOOOOO", &o_prjout, &o_im, &o_tv, &o_tt, &o_txLUT, &o_mmrcnst))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
// transaxial sino LUTs:
PyObject *pd_crs = PyDict_GetItemString(o_txLUT, "crs");
PyObject *pd_s2c = PyDict_GetItemString(o_txLUT, "s2c");
// sino to crystal, crystals
PyArrayObject *p_s2c = NULL, *p_crs = NULL;
p_s2c = (PyArrayObject *)PyArray_FROM_OTF(pd_s2c, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_crs = (PyArrayObject *)PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
// image object
PyArrayObject *p_im = NULL;
p_im = (PyArrayObject *)PyArray_FROM_OTF(o_im, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
// output sino object
PyArrayObject *p_prjout = NULL;
p_prjout = (PyArrayObject *)PyArray_FROM_OTF(o_prjout, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
// transaxial voxel sampling (ray-driven)
PyArrayObject *p_tv = NULL;
p_tv = (PyArrayObject *)PyArray_FROM_OTF(o_tv, NPY_UINT8, NPY_ARRAY_INOUT_ARRAY2);
// transaxial parameters for voxel sampling (ray-driven)
PyArrayObject *p_tt = NULL;
p_tt = (PyArrayObject *)PyArray_FROM_OTF(o_tt, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
//--
/* If that didn't work, throw an exception. */
if (p_s2c == NULL || p_im == NULL || p_crs == NULL || p_prjout == NULL || p_tv == NULL ||
p_tt == NULL) {
// sino 2 crystals
Py_XDECREF(p_s2c);
Py_XDECREF(p_crs);
// image object
PyArray_DiscardWritebackIfCopy(p_im);
Py_XDECREF(p_im);
// output sino object
PyArray_DiscardWritebackIfCopy(p_prjout);
Py_XDECREF(p_prjout);
// transaxial outputs
PyArray_DiscardWritebackIfCopy(p_tv);
Py_XDECREF(p_tv);
PyArray_DiscardWritebackIfCopy(p_tt);
Py_XDECREF(p_tt);
return NULL;
}
short *s2c = (short *)PyArray_DATA(p_s2c);
float *crs = (float *)PyArray_DATA(p_crs);
int N0crs = PyArray_DIM(p_crs, 0);
int N1crs = PyArray_DIM(p_crs, 1);
if (Cnt.LOG <= LOGDEBUG) printf("\ni> N0crs=%d, N1crs=%d\n", N0crs, N1crs);
float *im = (float *)PyArray_DATA(p_im);
if (Cnt.LOG <= LOGDEBUG)
printf("i> forward-projection image dimensions: %ld, %ld\n", PyArray_DIM(p_im, 0),
PyArray_DIM(p_im, 1));
// input/output projection sinogram
float *prjout = (float *)PyArray_DATA(p_prjout);
// output sampling
unsigned char *tv = (unsigned char *)PyArray_DATA(p_tv);
float *tt = (float *)PyArray_DATA(p_tt);
// CUDA --------------------------------------------------------------------
// sets the device on which to calculate
HANDLE_ERROR(cudaSetDevice(Cnt.DEVID));
int dev_id;
cudaGetDevice(&dev_id);
if (Cnt.LOG <= LOGDEBUG) printf("i> using CUDA device #%d\n", dev_id);
//--- TRANSAXIAL COMPONENTS
float4 *d_crs;
HANDLE_ERROR(cudaMalloc(&d_crs, N0crs * sizeof(float4)));
HANDLE_ERROR(cudaMemcpy(d_crs, crs, N0crs * sizeof(float4), cudaMemcpyHostToDevice));
short2 *d_s2c;
HANDLE_ERROR(cudaMalloc(&d_s2c, AW * sizeof(short2)));
HANDLE_ERROR(cudaMemcpy(d_s2c, s2c, AW * sizeof(short2), cudaMemcpyHostToDevice));
float *d_tt;
HANDLE_ERROR(cudaMalloc(&d_tt, N_TT * AW * sizeof(float)));
unsigned char *d_tv;
HANDLE_ERROR(cudaMalloc(&d_tv, N_TV * AW * sizeof(unsigned char)));
HANDLE_ERROR(cudaMemset(d_tv, 0, N_TV * AW * sizeof(unsigned char)));
//------------DO TRANSAXIAL CALCULATIONS------------------------------------
gpu_siddon_tx(d_crs, d_s2c, d_tt, d_tv);
//--------------------------------------------------------------------------
HANDLE_ERROR(cudaMemcpy(tt, d_tt, N_TT * AW * sizeof(float), cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaMemcpy(tv, d_tv, N_TV * AW * sizeof(unsigned char), cudaMemcpyDeviceToHost));
// CUDA END-----------------------------------------------------------------
// Clean up
Py_DECREF(p_s2c);
Py_DECREF(p_crs);
PyArray_ResolveWritebackIfCopy(p_im);
Py_DECREF(p_im);
PyArray_ResolveWritebackIfCopy(p_tv);
Py_DECREF(p_tv);
PyArray_ResolveWritebackIfCopy(p_tt);
Py_DECREF(p_tt);
PyArray_ResolveWritebackIfCopy(p_prjout);
Py_DECREF(p_prjout);
Py_INCREF(Py_None);
return Py_None;
}
//------------------------------------------------------------------------------
//==============================================================================
// F O R W A R D P R O J E C T O R
//------------------------------------------------------------------------------
static PyObject *frwd_prj(PyObject *self, PyObject *args, PyObject *kwargs) {
// Structure of constants
Cnst Cnt;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// axial LUT dictionary. contains such LUTs: li2rno, li2sn, li2nos.
PyObject *o_axLUT;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject *o_txLUT;
// input image to be forward projected (reshaped for GPU execution)
PyCuVec<float> *o_im = NULL;
// subsets for OSEM, first the default
PyObject *o_subs;
// output projection sino
PyCuVec<float> *o_prjout = NULL;
// flag for attenuation factors to be found based on mu-map; if 0 normal emission projection is
// used
int att;
bool SYNC = true; // whether to ensure deviceToHost copy on return
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
static const char *kwds[] = {"sino", "im", "txLUT", "axLUT", "subs",
"cnst", "att", "sync", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&OOOOi|b", (char **)kwds, &asPyCuVec_f,
&o_prjout, &asPyCuVec_f, &o_im, &o_txLUT, &o_axLUT, &o_subs,
&o_mmrcnst, &att, &SYNC))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject *pd_span = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (char)PyLong_AsLong(pd_span);
PyObject *pd_rngstrt = PyDict_GetItemString(o_mmrcnst, "RNG_STRT");
Cnt.RNG_STRT = (char)PyLong_AsLong(pd_rngstrt);
PyObject *pd_rngend = PyDict_GetItemString(o_mmrcnst, "RNG_END");
Cnt.RNG_END = (char)PyLong_AsLong(pd_rngend);
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
/* Interpret the input objects as numpy arrays. */
// axial LUTs:
PyObject *pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno");
PyObject *pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn");
PyObject *pd_li2sn1 = PyDict_GetItemString(o_axLUT, "li2sn1");
PyObject *pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos");
PyObject *pd_li2rng = PyDict_GetItemString(o_axLUT, "li2rng");
//-- get the arrays from the dictionaries
// axLUTs
PyArrayObject *p_li2rno = NULL, *p_li2sn1 = NULL, *p_li2sn = NULL;
PyArrayObject *p_li2nos = NULL, *p_li2rng = NULL;
p_li2rno = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rno, NPY_INT8, NPY_ARRAY_IN_ARRAY);
p_li2sn1 = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn1, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_li2sn = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_li2nos = (PyArrayObject *)PyArray_FROM_OTF(pd_li2nos, NPY_INT8, NPY_ARRAY_IN_ARRAY);
p_li2rng = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
// transaxial sino LUTs:
PyObject *pd_crs = PyDict_GetItemString(o_txLUT, "crs");
PyObject *pd_s2c = PyDict_GetItemString(o_txLUT, "s2c");
PyObject *pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali");
// sino to crystal, crystals
PyArrayObject *p_s2c = NULL, *p_crs = NULL, *p_aw2ali = NULL;
p_s2c = (PyArrayObject *)PyArray_FROM_OTF(pd_s2c, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_crs = (PyArrayObject *)PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
p_aw2ali = (PyArrayObject *)PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_ARRAY_IN_ARRAY);
// subsets if using e.g., OSEM
PyArrayObject *p_subs = NULL;
p_subs = (PyArrayObject *)PyArray_FROM_OTF(o_subs, NPY_INT32, NPY_ARRAY_IN_ARRAY);
//--
/* If that didn't work, throw an exception. */
if (p_li2rno == NULL || p_li2sn == NULL || p_li2sn1 == NULL || p_li2nos == NULL ||
p_aw2ali == NULL || p_s2c == NULL || p_crs == NULL || p_subs == NULL || p_li2rng == NULL) {
// axLUTs
Py_XDECREF(p_li2rno);
Py_XDECREF(p_li2sn);
Py_XDECREF(p_li2sn1);
Py_XDECREF(p_li2nos);
Py_XDECREF(p_li2rng);
// 2D sino LUT
Py_XDECREF(p_aw2ali);
// sino 2 crystals
Py_XDECREF(p_s2c);
Py_XDECREF(p_crs);
// subset definition object
Py_XDECREF(p_subs);
return NULL;
}
int *subs_ = (int *)PyArray_DATA(p_subs);
short *s2c = (short *)PyArray_DATA(p_s2c);
int *aw2ali = (int *)PyArray_DATA(p_aw2ali);
short *li2sn;
if (Cnt.SPN == 11) {
li2sn = (short *)PyArray_DATA(p_li2sn);
} else if (Cnt.SPN == 1) {
li2sn = (short *)PyArray_DATA(p_li2sn1);
}
char *li2nos = (char *)PyArray_DATA(p_li2nos);
float *li2rng = (float *)PyArray_DATA(p_li2rng);
float *crs = (float *)PyArray_DATA(p_crs);
if (Cnt.LOG <= LOGDEBUG)
printf("i> forward-projection image dimensions: %ld, %ld, %ld\n", o_im->shape[0],
o_im->shape[1], o_im->shape[2]);
int Nprj = PyArray_DIM(p_subs, 0);
int N0crs = PyArray_DIM(p_crs, 0);
int N1crs = PyArray_DIM(p_crs, 1);
int Naw = PyArray_DIM(p_aw2ali, 0);
if (Cnt.LOG <= LOGDEBUG)
printf("\ni> N0crs=%d, N1crs=%d, Naw=%d, Nprj=%d\n", N0crs, N1crs, Naw, Nprj);
int *subs;
if (subs_[0] == -1) {
Nprj = AW;
if (Cnt.LOG <= LOGDEBUG)
printf("i> no subsets defined. number of projection bins in 2D: %d\n", Nprj);
// all projections in
subs = (int *)malloc(Nprj * sizeof(int));
for (int i = 0; i < Nprj; i++) { subs[i] = i; }
} else {
if (Cnt.LOG <= LOGDEBUG)
printf("i> subsets defined. number of subset projection bins in 2D: %d\n", Nprj);
subs = subs_;
}
// sets the device on which to calculate
HANDLE_ERROR(cudaSetDevice(Cnt.DEVID));
//<><><><><><><<><><><><><><><><><><><><><><><><><<><><><><><><><><><><><><><><><><><><<><><><><><><><><><><>
//--- TRANSAXIAL COMPONENT
float4 *d_crs;
HANDLE_ERROR(cudaMalloc(&d_crs, N0crs * sizeof(float4)));
HANDLE_ERROR(cudaMemcpy(d_crs, crs, N0crs * sizeof(float4), cudaMemcpyHostToDevice));
short2 *d_s2c;
HANDLE_ERROR(cudaMalloc(&d_s2c, AW * sizeof(short2)));
HANDLE_ERROR(cudaMemcpy(d_s2c, s2c, AW * sizeof(short2), cudaMemcpyHostToDevice));
float *d_tt;
HANDLE_ERROR(cudaMalloc(&d_tt, N_TT * AW * sizeof(float)));
unsigned char *d_tv;
HANDLE_ERROR(cudaMalloc(&d_tv, N_TV * AW * sizeof(unsigned char)));
HANDLE_ERROR(cudaMemset(d_tv, 0, N_TV * AW * sizeof(unsigned char)));
// array of subset projection bins
int *d_subs;
HANDLE_ERROR(cudaMalloc(&d_subs, Nprj * sizeof(int)));
HANDLE_ERROR(cudaMemcpy(d_subs, subs, Nprj * sizeof(int), cudaMemcpyHostToDevice));
gpu_fprj(o_prjout->vec.data(), o_im->vec.data(), li2rng, li2sn, li2nos, d_s2c, aw2ali, d_crs,
d_subs, d_tt, d_tv, Nprj, Naw, Cnt, att, SYNC);
HANDLE_ERROR(cudaFree(d_subs));
HANDLE_ERROR(cudaFree(d_tv));
HANDLE_ERROR(cudaFree(d_tt));
HANDLE_ERROR(cudaFree(d_s2c));
HANDLE_ERROR(cudaFree(d_crs));
//<><><><><><><><<><><><><><><><><><><><><><><><><<><><><><><><><><><><><><><><><><><><<><><><><><><><><><><>
// Clean up
Py_DECREF(p_li2rno);
Py_DECREF(p_li2rng);
Py_DECREF(p_li2sn);
Py_DECREF(p_li2sn1);
Py_DECREF(p_li2nos);
Py_DECREF(p_aw2ali);
Py_DECREF(p_s2c);
Py_DECREF(p_crs);
Py_DECREF(p_subs);
if (subs_[0] == -1) free(subs);
Py_INCREF(Py_None);
return Py_None;
}
//==============================================================================
// B A C K P R O J E C T O R
//------------------------------------------------------------------------------
static PyObject *back_prj(PyObject *self, PyObject *args, PyObject *kwargs) {
// Structure of constants
Cnst Cnt;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// axial LUT dicionary. contains such LUTs: li2rno, li2sn, li2nos.
PyObject *o_axLUT;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject *o_txLUT;
// sino to be back projected to image (both reshaped for GPU execution)
PyCuVec<float> *o_sino = NULL;
// subsets for OSEM, first the default
PyObject *o_subs;
// output backprojected image
PyCuVec<float> *o_bimg = NULL;
bool SYNC = true; // whether to ensure deviceToHost copy on return
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
static const char *kwds[] = {"bimg", "sino", "txLUT", "axLUT", "subs", "cnst", "sync", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&OOOO|b", (char **)kwds, &asPyCuVec_f,
&o_bimg, &asPyCuVec_f, &o_sino, &o_txLUT, &o_axLUT, &o_subs,
&o_mmrcnst, &SYNC))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject *pd_span = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (char)PyLong_AsLong(pd_span);
PyObject *pd_rngstrt = PyDict_GetItemString(o_mmrcnst, "RNG_STRT");
Cnt.RNG_STRT = (char)PyLong_AsLong(pd_rngstrt);
PyObject *pd_rngend = PyDict_GetItemString(o_mmrcnst, "RNG_END");
Cnt.RNG_END = (char)PyLong_AsLong(pd_rngend);
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
/* Interpret the input objects as numpy arrays. */
// axial LUTs:
PyObject *pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno");
PyObject *pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn");
PyObject *pd_li2sn1 = PyDict_GetItemString(o_axLUT, "li2sn1");
PyObject *pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos");
PyObject *pd_li2rng = PyDict_GetItemString(o_axLUT, "li2rng");
// transaxial sino LUTs:
PyObject *pd_crs = PyDict_GetItemString(o_txLUT, "crs");
PyObject *pd_s2c = PyDict_GetItemString(o_txLUT, "s2c");
//-- get the arrays from the dictionaries
// axLUTs
PyArrayObject *p_li2rno = NULL, *p_li2sn1 = NULL, *p_li2sn = NULL;
PyArrayObject *p_li2nos = NULL, *p_li2rng = NULL;
p_li2rno = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rno, NPY_INT8, NPY_ARRAY_IN_ARRAY);
p_li2sn1 = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn1, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_li2sn = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_li2nos = (PyArrayObject *)PyArray_FROM_OTF(pd_li2nos, NPY_INT8, NPY_ARRAY_IN_ARRAY);
p_li2rng = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
// sino to crystal, crystals
PyArrayObject *p_s2c = NULL, *p_crs = NULL;
p_s2c = (PyArrayObject *)PyArray_FROM_OTF(pd_s2c, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_crs = (PyArrayObject *)PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
// subsets if using e.g., OSEM
PyArrayObject *p_subs = NULL;
p_subs = (PyArrayObject *)PyArray_FROM_OTF(o_subs, NPY_INT32, NPY_ARRAY_IN_ARRAY);
//--
/* If that didn't work, throw an exception. */
if (p_li2rno == NULL || p_li2sn == NULL || p_li2sn1 == NULL || p_li2nos == NULL ||
p_s2c == NULL || p_crs == NULL || p_subs == NULL || p_li2rng == NULL) {
// axLUTs
Py_XDECREF(p_li2rno);
Py_XDECREF(p_li2sn);
Py_XDECREF(p_li2sn1);
Py_XDECREF(p_li2nos);
Py_XDECREF(p_li2rng);
// sino 2 crystals
Py_XDECREF(p_s2c);
Py_XDECREF(p_crs);
// subset definition object
Py_XDECREF(p_subs);
return NULL;
}
int *subs_ = (int *)PyArray_DATA(p_subs);
short *s2c = (short *)PyArray_DATA(p_s2c);
short *li2sn;
if (Cnt.SPN == 11) {
li2sn = (short *)PyArray_DATA(p_li2sn);
} else if (Cnt.SPN == 1) {
li2sn = (short *)PyArray_DATA(p_li2sn1);
}
char *li2nos = (char *)PyArray_DATA(p_li2nos);
float *li2rng = (float *)PyArray_DATA(p_li2rng);
float *crs = (float *)PyArray_DATA(p_crs);
int Nprj = PyArray_DIM(p_subs, 0);
int N0crs = PyArray_DIM(p_crs, 0);
int N1crs = PyArray_DIM(p_crs, 1);
int *subs;
if (subs_[0] == -1) {
Nprj = AW;
if (Cnt.LOG <= LOGDEBUG)
printf("\ni> no subsets defined. number of projection bins in 2D: %d\n", Nprj);
// all projections in
subs = (int *)malloc(Nprj * sizeof(int));
for (int i = 0; i < Nprj; i++) { subs[i] = i; }
} else {
if (Cnt.LOG <= LOGDEBUG)
printf("\ni> subsets defined. number of subset projection bins in 2D: %d\n", Nprj);
subs = subs_;
}
if (Cnt.LOG <= LOGDEBUG)
printf("i> back-projection image dimensions: %ld, %ld, %ld\n", o_bimg->shape[0],
o_bimg->shape[1], o_bimg->shape[2]);
// sets the device on which to calculate
HANDLE_ERROR(cudaSetDevice(Cnt.DEVID));
//<><><<><><><><><><><><><><><><><><><><><<><><><><<><><><><><><><><><><><><><><><><><<><><><><><><>
float4 *d_crs;
HANDLE_ERROR(cudaMalloc(&d_crs, N0crs * sizeof(float4)));
HANDLE_ERROR(cudaMemcpy(d_crs, crs, N0crs * sizeof(float4), cudaMemcpyHostToDevice));
short2 *d_s2c;
HANDLE_ERROR(cudaMalloc(&d_s2c, AW * sizeof(short2)));
HANDLE_ERROR(cudaMemcpy(d_s2c, s2c, AW * sizeof(short2), cudaMemcpyHostToDevice));
float *d_tt;
HANDLE_ERROR(cudaMalloc(&d_tt, N_TT * AW * sizeof(float)));
unsigned char *d_tv;
HANDLE_ERROR(cudaMalloc(&d_tv, N_TV * AW * sizeof(unsigned char)));
HANDLE_ERROR(cudaMemset(d_tv, 0, N_TV * AW * sizeof(unsigned char)));
// array of subset projection bins
int *d_subs;
HANDLE_ERROR(cudaMalloc(&d_subs, Nprj * sizeof(int)));
HANDLE_ERROR(cudaMemcpy(d_subs, subs, Nprj * sizeof(int), cudaMemcpyHostToDevice));
gpu_bprj(o_bimg->vec.data(), o_sino->vec.data(), li2rng, li2sn, li2nos, d_s2c, d_crs, d_subs,
d_tt, d_tv, Nprj, Cnt, SYNC);
HANDLE_ERROR(cudaFree(d_subs));
HANDLE_ERROR(cudaFree(d_tv));
HANDLE_ERROR(cudaFree(d_tt));
HANDLE_ERROR(cudaFree(d_s2c));
HANDLE_ERROR(cudaFree(d_crs));
//<><><><><><><><><><><>><><><><><><><><><<><><><><<><><><><><><><><><><><><><><><><><<><><><><><><>
// Clean up
Py_DECREF(p_li2rno);
Py_DECREF(p_li2rng);
Py_DECREF(p_li2sn);
Py_DECREF(p_li2sn1);
Py_DECREF(p_li2nos);
Py_DECREF(p_s2c);
Py_DECREF(p_crs);
Py_DECREF(p_subs);
if (subs_[0] == -1) free(subs);
Py_INCREF(Py_None);
return Py_None;
}
//==============================================================================
// O S E M R E C O N S T R U C T I O N
//------------------------------------------------------------------------------
static PyObject *osem_rec(PyObject *self, PyObject *args) {
// Structure of constants
Cnst Cnt;
// output image
PyObject *o_imgout;
// output image mask
PyObject *o_rcnmsk;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// axial LUT dicionary. contains such LUTs: li2rno, li2sn, li2nos.
PyObject *o_axLUT;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject *o_txLUT;
// subsets for OSEM, first the default
PyObject *o_subs;
// separable kernel matrix, for x, y, and z dimensions
PyObject *o_krnl;
// sinos using in reconstruction (reshaped for GPU execution)
PyObject *o_psng; // prompts (measured)
PyObject *o_rsng; // randoms
PyObject *o_ssng; // scatter
PyObject *o_nsng; // norm
PyObject *o_asng; // attenuation
// sensitivity image
PyObject *o_imgsens;
/* ^^^^^^^^^^^^^^^^^^^^^^^ Parse the input tuple ^^^^^^^^^^^^^^^^^^^^^^^^^^^ */
if (!PyArg_ParseTuple(args, "OOOOOOOOOOOOO", &o_imgout, &o_psng, &o_rsng, &o_ssng, &o_nsng,
&o_asng, &o_subs, &o_imgsens, &o_rcnmsk, &o_krnl, &o_txLUT, &o_axLUT,
&o_mmrcnst))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_span = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (char)PyLong_AsLong(pd_span);
PyObject *pd_sigma_rm = PyDict_GetItemString(o_mmrcnst, "SIGMA_RM");
Cnt.SIGMA_RM = (float)PyFloat_AsDouble(pd_sigma_rm);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
/* Interpret the input objects as numpy arrays. */
// axial LUTs:
PyObject *pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno");
PyObject *pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn");
PyObject *pd_li2sn1 = PyDict_GetItemString(o_axLUT, "li2sn1");
PyObject *pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos");
PyObject *pd_li2rng = PyDict_GetItemString(o_axLUT, "li2rng");
// transaxial sino LUTs:
PyObject *pd_crs = PyDict_GetItemString(o_txLUT, "crs");
PyObject *pd_s2c = PyDict_GetItemString(o_txLUT, "s2c");
//-- get the arrays from the dictionaries
// output back-projection image
PyArrayObject *p_imgout = NULL;
p_imgout = (PyArrayObject *)PyArray_FROM_OTF(o_imgout, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
// image mask
PyArrayObject *p_rcnmsk = NULL;
p_rcnmsk = (PyArrayObject *)PyArray_FROM_OTF(o_rcnmsk, NPY_BOOL, NPY_ARRAY_IN_ARRAY);
// sensitivity image
PyArrayObject *p_imgsens = NULL;
p_imgsens = (PyArrayObject *)PyArray_FROM_OTF(o_imgsens, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
//> PSF kernel
PyArrayObject *p_krnl = NULL;
p_krnl = (PyArrayObject *)PyArray_FROM_OTF(o_krnl, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
//> sinogram objects
PyArrayObject *p_psng = NULL, *p_rsng = NULL, *p_ssng = NULL, *p_nsng = NULL, *p_asng = NULL;
p_psng = (PyArrayObject *)PyArray_FROM_OTF(o_psng, NPY_UINT16, NPY_ARRAY_IN_ARRAY);
p_rsng = (PyArrayObject *)PyArray_FROM_OTF(o_rsng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
p_ssng = (PyArrayObject *)PyArray_FROM_OTF(o_ssng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
p_nsng = (PyArrayObject *)PyArray_FROM_OTF(o_nsng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
p_asng = (PyArrayObject *)PyArray_FROM_OTF(o_asng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
// subset definition
PyArrayObject *p_subs = NULL;
p_subs = (PyArrayObject *)PyArray_FROM_OTF(o_subs, NPY_INT32, NPY_ARRAY_IN_ARRAY);
// axLUTs
PyArrayObject *p_li2rno = NULL, *p_li2sn1 = NULL, *p_li2sn = NULL;
PyArrayObject *p_li2nos = NULL, *p_li2rng = NULL;
p_li2rno = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rno, NPY_INT8, NPY_ARRAY_IN_ARRAY);
p_li2sn = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_li2sn1 = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn1, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_li2nos = (PyArrayObject *)PyArray_FROM_OTF(pd_li2nos, NPY_INT8, NPY_ARRAY_IN_ARRAY);
p_li2rng = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
// sino to crystal, crystals
PyArrayObject *p_s2c = NULL, *p_crs = NULL;
p_s2c = (PyArrayObject *)PyArray_FROM_OTF(pd_s2c, NPY_INT16, NPY_ARRAY_IN_ARRAY);
p_crs = (PyArrayObject *)PyArray_FROM_OTF(pd_crs, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
//--
/* If that didn't work, throw an exception. */
if (p_imgout == NULL || p_rcnmsk == NULL || p_subs == NULL || p_psng == NULL || p_rsng == NULL ||
p_ssng == NULL || p_nsng == NULL || p_asng == NULL || p_imgsens == NULL ||
p_li2rno == NULL || p_li2sn == NULL || p_li2sn1 == NULL || p_li2nos == NULL ||
p_s2c == NULL || p_crs == NULL || p_krnl == NULL) {
//> output image
PyArray_DiscardWritebackIfCopy(p_imgout);
Py_XDECREF(p_imgout);
Py_XDECREF(p_rcnmsk);
//> objects in the sinogram space
Py_XDECREF(p_psng);
Py_XDECREF(p_rsng);
Py_XDECREF(p_ssng);
Py_XDECREF(p_nsng);
Py_XDECREF(p_asng);
//> subsets
Py_XDECREF(p_subs);
//> objects in the image space
Py_XDECREF(p_imgsens);
Py_XDECREF(p_krnl);
//> axLUTs
Py_XDECREF(p_li2rno);
Py_XDECREF(p_li2sn);
Py_XDECREF(p_li2sn1);
Py_XDECREF(p_li2nos);
//> sinogram to crystal LUTs
Py_XDECREF(p_s2c);
Py_XDECREF(p_crs);
return NULL;
}
float *imgout = (float *)PyArray_DATA(p_imgout);
bool *rcnmsk = (bool *)PyArray_DATA(p_rcnmsk);
unsigned short *psng = (unsigned short *)PyArray_DATA(p_psng);
float *rsng = (float *)PyArray_DATA(p_rsng);
float *ssng = (float *)PyArray_DATA(p_ssng);
float *nsng = (float *)PyArray_DATA(p_nsng);
float *asng = (float *)PyArray_DATA(p_asng);
//> sensitivity image
float *imgsens = (float *)PyArray_DATA(p_imgsens);
//>--- PSF KERNEL ---
float *krnl;
int SZ_KRNL = (int)PyArray_DIM(p_krnl, 1);
if (Cnt.LOG <= LOGDEBUG) printf("d> kernel size [voxels]: %d\n", SZ_KRNL);
if (SZ_KRNL != KERNEL_LENGTH) {
if (Cnt.LOG <= LOGWARNING) printf("w> wrong kernel size.\n");
krnl = (float *)malloc(KERNEL_LENGTH * sizeof(float));
krnl[0] = -1;
} else {
krnl = (float *)PyArray_DATA(p_krnl);
}
//>-------------------
short *li2sn;
if (Cnt.SPN == 11) {
li2sn = (short *)PyArray_DATA(p_li2sn);
} else if (Cnt.SPN == 1) {
li2sn = (short *)PyArray_DATA(p_li2sn1);
}
char *li2nos = (char *)PyArray_DATA(p_li2nos);
float *li2rng = (float *)PyArray_DATA(p_li2rng);
float *crs = (float *)PyArray_DATA(p_crs);
short *s2c = (short *)PyArray_DATA(p_s2c);
int N0crs = PyArray_DIM(p_crs, 0);
int N1crs = PyArray_DIM(p_crs, 1);
// number of subsets
int Nsub = PyArray_DIM(p_subs, 0);
// number of elements used to store max. number of subsets projection - 1
int Nprj = PyArray_DIM(p_subs, 1);
if (Cnt.LOG <= LOGDEBUG)
printf("i> number of subsets = %d, and max. number of projections/subset = %d\n", Nsub,
Nprj - 1);
int *subs = (int *)PyArray_DATA(p_subs);
// sets the device on which to calculate
HANDLE_ERROR(cudaSetDevice(Cnt.DEVID));
//<><><<><><><><<><><><><><><><><><><>
osem(imgout, rcnmsk, psng, rsng, ssng, nsng, asng, subs, imgsens, krnl, li2rng, li2sn, li2nos,
s2c, crs, Nsub, Nprj, N0crs, Cnt);
//<><><><><><><><<><><><>><><><><><><>
// Clean up
PyArray_ResolveWritebackIfCopy(p_imgout);
Py_DECREF(p_imgout);
Py_DECREF(p_rcnmsk);
Py_DECREF(p_psng);
Py_DECREF(p_rsng);
Py_DECREF(p_ssng);
Py_DECREF(p_nsng);
Py_DECREF(p_asng);
Py_DECREF(p_subs);
Py_DECREF(p_imgsens);
Py_DECREF(p_krnl);
Py_DECREF(p_li2rno);
Py_DECREF(p_li2rng);
Py_DECREF(p_li2sn);
Py_DECREF(p_li2sn1);
Py_DECREF(p_li2nos);
Py_DECREF(p_s2c);
Py_DECREF(p_crs);
Py_INCREF(Py_None);
return Py_None;
}
|
a603d72a6cc49bac17d47cc7ecb5f942d2d32840.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#ifndef _FILTER_KERNEL_H_
#define _FILTER_KERNEL_H_
#include <shrUtils.h>
#include <cutil_inline.h> // includes cuda.h and hip/hip_runtime_api.h
#include <cutil_math.h>
#include <generalCuda.cuh>
texture<uchar4, 2, hipReadModeNormalizedFloat> rgbaTex;
texture<float, 1, hipReadModeElementType> gaussianTex;
hipArray* d_array, *d_tempArray, *d_gaussianArray;
/*
Perform a simple bilateral filter.
Bilateral filter is a nonlinear filter that is a mixture of range
filter and domain filter, the previous one preserves crisp edges and
the latter one filters noise. The intensity value at each pixel in
an image is replaced by a weighted average of intensity values from
nearby pixels.
The weight factor is calculated by the product of domain filter
component(using the gaussian distribution as a spatial distance) as
well as range filter component(Euclidean distance between center pixel
and the current neighbor pixel). Because this process is nonlinear,
the sample just uses a simple pixel by pixel step.
Texture fetches automatically clamp to edge of image. 1D gaussian array
is mapped to a 1D texture instead of using shared memory, which may
cause severe bank conflict.
Threads are y-pass(column-pass), because the output is coalesced.
Parameters
od - pointer to output data in global memory
d_f - pointer to the 1D gaussian array
e_d - euclidean delta
w - image width
h - image height
r - filter radius
*/
//column pass using coalesced global memory reads
__global__ void
d_bilateral_filter(uint *od,
float e_d, int w, int h, int r)
{
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x < w && y < h) {
float sum = 0.0f;
float factor;
float4 t = make_float4(0.0f);
float4 center = tex2D(rgbaTex, x, y);
for(int i = -r; i <= r; i++)
{
for(int j = -r; j <= r; j++)
{
float4 curPix = tex2D(rgbaTex, x + j, y + i);
factor = (tex1D(gaussianTex, i + r) * tex1D(gaussianTex, j + r)) * //domain factor
euclideanLen(curPix, center, e_d); //range factor
t += factor * curPix;
sum += factor;
}
}
od[y * w + x] = rgbaFloatToInt( t / sum);
}
}
__global__ void debugGaussianKernel(float *readValue)
{
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
readValue[x] = tex1D(gaussianTex, x);
}
extern "C"
void debugGaussianTex(int radius)
{
float* d_readValue;
int size = 3 * radius + 1;
cutilSafeCall(hipMalloc( (void**)&d_readValue, size * sizeof(float)));
hipLaunchKernelGGL(( debugGaussianKernel), dim3(1), dim3(size) , 0, 0, d_readValue);
float* h_readValue = (float*)calloc(size, sizeof(float));
cutilSafeCall(hipMemcpy( h_readValue, d_readValue, size * sizeof(float), hipMemcpyDeviceToHost ));
for (int s = 0; s < size; s++) {
printf("gaussianTex[%d] = %f\n", s, h_readValue[s]);
}
free(h_readValue);
cutilSafeCall(hipFree(d_readValue));
}
extern "C"
void initTexture(int width, int height, void *pImage)
{
int size = width * height * sizeof(unsigned int);
// copy image data to array
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8, 8, 8, 8, hipChannelFormatKindUnsigned);
cutilSafeCall( hipMallocArray ( &d_array, &channelDesc, width, height ));
cutilSafeCall( hipMallocArray ( &d_tempArray, &channelDesc, width, height ));
cutilSafeCall( hipMemcpyToArray( d_array, 0, 0, pImage, size, hipMemcpyHostToDevice));
}
extern "C"
void freeTextures()
{
cutilSafeCall(hipFreeArray(d_array));
cutilSafeCall(hipFreeArray(d_tempArray));
cutilSafeCall(hipFreeArray(d_gaussianArray));
}
extern "C"
void updateGaussian(float delta, int radius)
{
int size = 2 * radius + 1;
float* d_gaussian;
cutilSafeCall(hipMalloc( (void**) &d_gaussian,
(2 * radius + 1)* sizeof(float)));
//generate gaussian array
hipLaunchKernelGGL(( d_generate_gaussian), dim3(1), dim3(size), 0, 0, d_gaussian, delta, radius);
//create cuda array
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
cutilSafeCall( hipMallocArray( &d_gaussianArray, &channelDesc, size, 1 ));
cutilSafeCall( hipMemcpyToArray( d_gaussianArray, 0, 0, d_gaussian, size * sizeof (float), hipMemcpyDeviceToDevice));
// Bind the array to the texture
cutilSafeCall( hipBindTextureToArray( gaussianTex, d_gaussianArray, channelDesc));
cutilSafeCall( hipFree(d_gaussian) );
// debugGaussianTex(radius); // print out the content of gaussianTex for debugging
}
/*
Perform 2D bilateral filter on image using CUDA
Parameters:
d_dest - pointer to destination image in device memory
width - image width
height - image height
e_d - euclidean delta
radius - filter radius
iterations - number of iterations
*/
// RGBA version
extern "C"
double bilateralFilterRGBA(uint *d_dest,
int width, int height,
float e_d, int radius, int iterations,
int nthreads)
{
// var for kernel computation timing
double dKernelTime;
// Bind the array to the texture
cutilSafeCall( hipBindTextureToArray(rgbaTex, d_array) );
for(int i=0; i<iterations; i++)
{
// sync host and start kernel computation timer
dKernelTime = 0.0;
cutilSafeCall(cutilDeviceSynchronize());
shrDeltaT(0);
dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16);
dim3 blockSize(16, 16);
hipLaunchKernelGGL(( d_bilateral_filter), dim3(gridSize), dim3(blockSize), 0, 0,
d_dest, e_d, width, height, radius);
// sync host and stop computation timer
cutilSafeCall( cutilDeviceSynchronize() );
dKernelTime += shrDeltaT(0);
if (iterations > 1) {
// copy result back from global memory to array
cutilSafeCall( hipMemcpyToArray( d_tempArray, 0, 0, d_dest, width * height * sizeof(float),
hipMemcpyDeviceToDevice));
cutilSafeCall( hipBindTextureToArray(rgbaTex, d_tempArray) );
}
}
return (dKernelTime/(double)iterations);
}
#endif
| a603d72a6cc49bac17d47cc7ecb5f942d2d32840.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#ifndef _FILTER_KERNEL_H_
#define _FILTER_KERNEL_H_
#include <shrUtils.h>
#include <cutil_inline.h> // includes cuda.h and cuda_runtime_api.h
#include <cutil_math.h>
#include <generalCuda.cuh>
texture<uchar4, 2, cudaReadModeNormalizedFloat> rgbaTex;
texture<float, 1, cudaReadModeElementType> gaussianTex;
cudaArray* d_array, *d_tempArray, *d_gaussianArray;
/*
Perform a simple bilateral filter.
Bilateral filter is a nonlinear filter that is a mixture of range
filter and domain filter, the previous one preserves crisp edges and
the latter one filters noise. The intensity value at each pixel in
an image is replaced by a weighted average of intensity values from
nearby pixels.
The weight factor is calculated by the product of domain filter
component(using the gaussian distribution as a spatial distance) as
well as range filter component(Euclidean distance between center pixel
and the current neighbor pixel). Because this process is nonlinear,
the sample just uses a simple pixel by pixel step.
Texture fetches automatically clamp to edge of image. 1D gaussian array
is mapped to a 1D texture instead of using shared memory, which may
cause severe bank conflict.
Threads are y-pass(column-pass), because the output is coalesced.
Parameters
od - pointer to output data in global memory
d_f - pointer to the 1D gaussian array
e_d - euclidean delta
w - image width
h - image height
r - filter radius
*/
//column pass using coalesced global memory reads
__global__ void
d_bilateral_filter(uint *od,
float e_d, int w, int h, int r)
{
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x < w && y < h) {
float sum = 0.0f;
float factor;
float4 t = make_float4(0.0f);
float4 center = tex2D(rgbaTex, x, y);
for(int i = -r; i <= r; i++)
{
for(int j = -r; j <= r; j++)
{
float4 curPix = tex2D(rgbaTex, x + j, y + i);
factor = (tex1D(gaussianTex, i + r) * tex1D(gaussianTex, j + r)) * //domain factor
euclideanLen(curPix, center, e_d); //range factor
t += factor * curPix;
sum += factor;
}
}
od[y * w + x] = rgbaFloatToInt( t / sum);
}
}
__global__ void debugGaussianKernel(float *readValue)
{
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
readValue[x] = tex1D(gaussianTex, x);
}
extern "C"
void debugGaussianTex(int radius)
{
float* d_readValue;
int size = 3 * radius + 1;
cutilSafeCall(cudaMalloc( (void**)&d_readValue, size * sizeof(float)));
debugGaussianKernel<<< 1, size >>>(d_readValue);
float* h_readValue = (float*)calloc(size, sizeof(float));
cutilSafeCall(cudaMemcpy( h_readValue, d_readValue, size * sizeof(float), cudaMemcpyDeviceToHost ));
for (int s = 0; s < size; s++) {
printf("gaussianTex[%d] = %f\n", s, h_readValue[s]);
}
free(h_readValue);
cutilSafeCall(cudaFree(d_readValue));
}
extern "C"
void initTexture(int width, int height, void *pImage)
{
int size = width * height * sizeof(unsigned int);
// copy image data to array
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned);
cutilSafeCall( cudaMallocArray ( &d_array, &channelDesc, width, height ));
cutilSafeCall( cudaMallocArray ( &d_tempArray, &channelDesc, width, height ));
cutilSafeCall( cudaMemcpyToArray( d_array, 0, 0, pImage, size, cudaMemcpyHostToDevice));
}
extern "C"
void freeTextures()
{
cutilSafeCall(cudaFreeArray(d_array));
cutilSafeCall(cudaFreeArray(d_tempArray));
cutilSafeCall(cudaFreeArray(d_gaussianArray));
}
extern "C"
void updateGaussian(float delta, int radius)
{
int size = 2 * radius + 1;
float* d_gaussian;
cutilSafeCall(cudaMalloc( (void**) &d_gaussian,
(2 * radius + 1)* sizeof(float)));
//generate gaussian array
d_generate_gaussian<<< 1, size>>>(d_gaussian, delta, radius);
//create cuda array
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cutilSafeCall( cudaMallocArray( &d_gaussianArray, &channelDesc, size, 1 ));
cutilSafeCall( cudaMemcpyToArray( d_gaussianArray, 0, 0, d_gaussian, size * sizeof (float), cudaMemcpyDeviceToDevice));
// Bind the array to the texture
cutilSafeCall( cudaBindTextureToArray( gaussianTex, d_gaussianArray, channelDesc));
cutilSafeCall( cudaFree(d_gaussian) );
// debugGaussianTex(radius); // print out the content of gaussianTex for debugging
}
/*
Perform 2D bilateral filter on image using CUDA
Parameters:
d_dest - pointer to destination image in device memory
width - image width
height - image height
e_d - euclidean delta
radius - filter radius
iterations - number of iterations
*/
// RGBA version
extern "C"
double bilateralFilterRGBA(uint *d_dest,
int width, int height,
float e_d, int radius, int iterations,
int nthreads)
{
// var for kernel computation timing
double dKernelTime;
// Bind the array to the texture
cutilSafeCall( cudaBindTextureToArray(rgbaTex, d_array) );
for(int i=0; i<iterations; i++)
{
// sync host and start kernel computation timer
dKernelTime = 0.0;
cutilSafeCall(cutilDeviceSynchronize());
shrDeltaT(0);
dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16);
dim3 blockSize(16, 16);
d_bilateral_filter<<< gridSize, blockSize>>>(
d_dest, e_d, width, height, radius);
// sync host and stop computation timer
cutilSafeCall( cutilDeviceSynchronize() );
dKernelTime += shrDeltaT(0);
if (iterations > 1) {
// copy result back from global memory to array
cutilSafeCall( cudaMemcpyToArray( d_tempArray, 0, 0, d_dest, width * height * sizeof(float),
cudaMemcpyDeviceToDevice));
cutilSafeCall( cudaBindTextureToArray(rgbaTex, d_tempArray) );
}
}
return (dKernelTime/(double)iterations);
}
#endif
|
e4d8f8982c01bdd2fe81b61df586d58396867f51.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __MEDIANFILTER_CU_
#define __MEDIANFILTER_CU_
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <iostream>
#define datasize 100
extern "C" void ArithMedianFilter_host(int *pixel, int Width, int Height);
inline void checkCudaErrors(hipError_t err) //cuda error handle function
{
if (hipSuccess != err)
{
fprintf(stderr, "CUDA Runtime API error:%s.\n", hipGetErrorString(err));
return;
}
}
__global__ void ArithMedianFilter(int *In, int *Out, int Width, int Height)
{
int window[9];
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x <= Width && x >= 0 && y <= Height && y >= 0)
{
// if(x == 0 || y == 0 || x == Width -1 || y == Height - 1)
// {
// Out[y* Width + x] = In[y* Width + x];
// return;
// }
window[0] = (y == 0 || x == 0) ? 125 : In[(y - 1)* Width + x - 1];
window[1] = (y == 0) ? 125 : In[(y - 1)* Width + x];
window[2] = (y == 0 || x == Width - 1) ? 125 : In[(y - 1)* Width + x + 1];
window[3] = (x == 0) ? 125 : In[y* Width + x - 1];
window[4] = In[y* Width + x];
window[5] = (x == Width - 1) ? 125 : In[y* Width + x + 1];
window[6] = (y == Height - 1 || x == 0) ? 125 : In[(y + 1)* Width + x - 1];
window[7] = (y == Height - 1) ? 125 : In[(y + 1)* Width + x];
window[8] = (y == Height - 1 || x == Width - 1) ? 125 : In[(y + 1)* Width + x + 1];
int pixel = 0;
for (int i = 0; i < 9; i++)
pixel += window[i];
pixel /= 9;
//for (unsigned int j = 0; j < 5; j++)
//{
// int min = j;
// for (unsigned int l = j + 1; l < 9; l++)
// if (window[l] < window[min])
// min = l;
// const float temp = window[j];
// window[j] = window[min];
// window[min] = temp;
//}
Out[y* Width + x] = pixel;
}
}
extern "C" void ArithMedianFilter_host(int *pixel, int Width, int Height)
{
int *pixelIn, *pixelOut;
dim3 dimBlock(32, 32);
dim3 dimGrid((Width + dimBlock.x - 1) / dimBlock.x, (Height + dimBlock.y -
1) / dimBlock.y);
checkCudaErrors(hipMalloc((void**)&pixelIn, sizeof(int) * Width * Height));
checkCudaErrors(hipMalloc((void**)&pixelOut, sizeof(int) * Width * Height));
checkCudaErrors(hipMemcpy(pixelIn, pixel, sizeof(int) * Width * Height, hipMemcpyHostToDevice));
ArithMedianFilter << <dimGrid, dimBlock >> > (pixelIn, pixelOut, Width, Height);
checkCudaErrors(hipMemcpy(pixel, pixelOut, sizeof(int) * Width * Height, hipMemcpyDeviceToHost));
hipFree(pixelIn);
hipFree(pixelOut);
}
#endif // ! __MEDIANFILTER_KERNEL_CU_
| e4d8f8982c01bdd2fe81b61df586d58396867f51.cu | #ifndef __MEDIANFILTER_CU_
#define __MEDIANFILTER_CU_
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <iostream>
#define datasize 100
extern "C" void ArithMedianFilter_host(int *pixel, int Width, int Height);
inline void checkCudaErrors(cudaError err) //cuda error handle function
{
if (cudaSuccess != err)
{
fprintf(stderr, "CUDA Runtime API error:%s.\n", cudaGetErrorString(err));
return;
}
}
__global__ void ArithMedianFilter(int *In, int *Out, int Width, int Height)
{
int window[9];
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x <= Width && x >= 0 && y <= Height && y >= 0)
{
// if(x == 0 || y == 0 || x == Width -1 || y == Height - 1)
// {
// Out[y* Width + x] = In[y* Width + x];
// return;
// }
window[0] = (y == 0 || x == 0) ? 125 : In[(y - 1)* Width + x - 1];
window[1] = (y == 0) ? 125 : In[(y - 1)* Width + x];
window[2] = (y == 0 || x == Width - 1) ? 125 : In[(y - 1)* Width + x + 1];
window[3] = (x == 0) ? 125 : In[y* Width + x - 1];
window[4] = In[y* Width + x];
window[5] = (x == Width - 1) ? 125 : In[y* Width + x + 1];
window[6] = (y == Height - 1 || x == 0) ? 125 : In[(y + 1)* Width + x - 1];
window[7] = (y == Height - 1) ? 125 : In[(y + 1)* Width + x];
window[8] = (y == Height - 1 || x == Width - 1) ? 125 : In[(y + 1)* Width + x + 1];
int pixel = 0;
for (int i = 0; i < 9; i++)
pixel += window[i];
pixel /= 9;
//for (unsigned int j = 0; j < 5; j++)
//{
// int min = j;
// for (unsigned int l = j + 1; l < 9; l++)
// if (window[l] < window[min])
// min = l;
// const float temp = window[j];
// window[j] = window[min];
// window[min] = temp;
//}
Out[y* Width + x] = pixel;
}
}
extern "C" void ArithMedianFilter_host(int *pixel, int Width, int Height)
{
int *pixelIn, *pixelOut;
dim3 dimBlock(32, 32);
dim3 dimGrid((Width + dimBlock.x - 1) / dimBlock.x, (Height + dimBlock.y -
1) / dimBlock.y);
checkCudaErrors(cudaMalloc((void**)&pixelIn, sizeof(int) * Width * Height));
checkCudaErrors(cudaMalloc((void**)&pixelOut, sizeof(int) * Width * Height));
checkCudaErrors(cudaMemcpy(pixelIn, pixel, sizeof(int) * Width * Height, cudaMemcpyHostToDevice));
ArithMedianFilter << <dimGrid, dimBlock >> > (pixelIn, pixelOut, Width, Height);
checkCudaErrors(cudaMemcpy(pixel, pixelOut, sizeof(int) * Width * Height, cudaMemcpyDeviceToHost));
cudaFree(pixelIn);
cudaFree(pixelOut);
}
#endif // ! __MEDIANFILTER_KERNEL_CU_
|
c1710ddab41e290b8f0c75823c8b36b3812f9ec8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cu_getRange.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
float *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
const int xstart = 1;
const int xend = 1;
const int ystart = 1;
const int yend = 1;
const int colssrc = 1;
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cu_getRange), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,xstart,xend,ystart,yend,colssrc,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cu_getRange), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,xstart,xend,ystart,yend,colssrc,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cu_getRange), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,xstart,xend,ystart,yend,colssrc,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c1710ddab41e290b8f0c75823c8b36b3812f9ec8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cu_getRange.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
float *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
const int xstart = 1;
const int xend = 1;
const int ystart = 1;
const int yend = 1;
const int colssrc = 1;
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cu_getRange<<<gridBlock,threadBlock>>>(src,dst,xstart,xend,ystart,yend,colssrc,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cu_getRange<<<gridBlock,threadBlock>>>(src,dst,xstart,xend,ystart,yend,colssrc,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cu_getRange<<<gridBlock,threadBlock>>>(src,dst,xstart,xend,ystart,yend,colssrc,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.